1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CGOpenCLRuntime.h"
16#include "CGRecordLayout.h"
17#include "CodeGenFunction.h"
18#include "CodeGenModule.h"
19#include "ConstantEmitter.h"
20#include "PatternInit.h"
21#include "TargetInfo.h"
22#include "clang/AST/ASTContext.h"
23#include "clang/AST/Decl.h"
24#include "clang/AST/OSLog.h"
25#include "clang/Basic/TargetBuiltins.h"
26#include "clang/Basic/TargetInfo.h"
27#include "clang/CodeGen/CGFunctionInfo.h"
28#include "clang/Frontend/FrontendDiagnostic.h"
29#include "llvm/ADT/SmallPtrSet.h"
30#include "llvm/ADT/StringExtras.h"
31#include "llvm/Analysis/AssumptionCache.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/InlineAsm.h"
34#include "llvm/IR/IntrinsicInst.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/MDBuilder.h"
37#include "llvm/Support/ConvertUTF.h"
38#include "llvm/Support/ScopedPrinter.h"
39#include "llvm/Support/TargetParser.h"
40#include "llvm/Transforms/Utils/Local.h"
41#include <sstream>
42
43using namespace clang;
44using namespace CodeGen;
45using namespace llvm;
46
47static
48int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
49 return std::min(High, std::max(Low, Value));
50}
51
52static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, unsigned AlignmentInBytes) {
53 ConstantInt *Byte;
54 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
55 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
56 // Nothing to initialize.
57 return;
58 case LangOptions::TrivialAutoVarInitKind::Zero:
59 Byte = CGF.Builder.getInt8(0x00);
60 break;
61 case LangOptions::TrivialAutoVarInitKind::Pattern: {
62 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
63 Byte = llvm::dyn_cast<llvm::ConstantInt>(
64 initializationPatternFor(CGF.CGM, Int8));
65 break;
66 }
67 }
68 CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
69}
70
71/// getBuiltinLibFunction - Given a builtin id for a function like
72/// "__builtin_fabsf", return a Function* for "fabsf".
73llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
74 unsigned BuiltinID) {
75 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
76
77 // Get the name, skip over the __builtin_ prefix (if necessary).
78 StringRef Name;
79 GlobalDecl D(FD);
80
81 // If the builtin has been declared explicitly with an assembler label,
82 // use the mangled name. This differs from the plain label on platforms
83 // that prefix labels.
84 if (FD->hasAttr<AsmLabelAttr>())
85 Name = getMangledName(D);
86 else
87 Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
88
89 llvm::FunctionType *Ty =
90 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
91
92 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
93}
94
95/// Emit the conversions required to turn the given value into an
96/// integer of the given size.
97static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
98 QualType T, llvm::IntegerType *IntType) {
99 V = CGF.EmitToMemory(V, T);
100
101 if (V->getType()->isPointerTy())
102 return CGF.Builder.CreatePtrToInt(V, IntType);
103
104 assert(V->getType() == IntType);
105 return V;
106}
107
108static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
109 QualType T, llvm::Type *ResultType) {
110 V = CGF.EmitFromMemory(V, T);
111
112 if (ResultType->isPointerTy())
113 return CGF.Builder.CreateIntToPtr(V, ResultType);
114
115 assert(V->getType() == ResultType);
116 return V;
117}
118
119/// Utility to insert an atomic instruction based on Intrinsic::ID
120/// and the expression node.
121static Value *MakeBinaryAtomicValue(
122 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
123 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
124 QualType T = E->getType();
125 assert(E->getArg(0)->getType()->isPointerType());
126 assert(CGF.getContext().hasSameUnqualifiedType(T,
127 E->getArg(0)->getType()->getPointeeType()));
128 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
129
130 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
131
132 llvm::Value *Args[2];
133 Args[0] = DestPtr;
134 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
135 QualType ArgTy = E->getArg(1)->getType();
136 if (ArgTy->isBooleanType())
137 Args[1] = CGF.EmitToMemory(Args[1], E->getArg(1)->getType());
138
139 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
140 Kind, Args[0], Args[1], Ordering);
141 if (ArgTy->isBooleanType())
142 Result = CGF.EmitFromMemory(Result, ArgTy);
143 return Result;
144}
145
146static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E,
147 unsigned AddrSpace) {
148 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
149 Value *Address = CGF.EmitScalarExpr(E->getArg(1));
150
151 // Convert the type of the pointer to a pointer to the stored type.
152 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
153 Value *BC = CGF.Builder.CreateBitCast(
154 Address, llvm::PointerType::get(Val->getType(), AddrSpace), "cast");
155 LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
156 LV.setNontemporal(true);
157 CGF.EmitStoreOfScalar(Val, LV, false);
158 return nullptr;
159}
160
161static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
162 Value *Address = CGF.EmitScalarExpr(E->getArg(0));
163
164 LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
165 LV.setNontemporal(true);
166 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
167}
168
169static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
170 llvm::AtomicRMWInst::BinOp Kind,
171 const CallExpr *E) {
172 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
173}
174
175/// Utility to insert an atomic instruction based Intrinsic::ID and
176/// the expression node, where the return value is the result of the
177/// operation.
178static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
179 llvm::AtomicRMWInst::BinOp Kind,
180 const CallExpr *E,
181 Instruction::BinaryOps Op,
182 bool Invert = false) {
183 QualType T = E->getType();
184 assert(E->getArg(0)->getType()->isPointerType());
185 assert(CGF.getContext().hasSameUnqualifiedType(T,
186 E->getArg(0)->getType()->getPointeeType()));
187 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
188
189 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
190 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
191
192 llvm::IntegerType *IntType =
193 llvm::IntegerType::get(CGF.getLLVMContext(),
194 CGF.getContext().getTypeSize(T));
195 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
196
197 llvm::Value *Args[2];
198 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
199 llvm::Type *ValueType = Args[1]->getType();
200 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
201 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
202
203 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
204 Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
205 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
206 if (Invert)
207 Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
208 llvm::ConstantInt::get(IntType, -1));
209 Result = EmitFromInt(CGF, Result, T, ValueType);
210 return RValue::get(Result);
211}
212
213/// Utility to insert an atomic cmpxchg instruction.
214///
215/// @param CGF The current codegen function.
216/// @param E Builtin call expression to convert to cmpxchg.
217/// arg0 - address to operate on
218/// arg1 - value to compare with
219/// arg2 - new value
220/// @param ReturnBool Specifies whether to return success flag of
221/// cmpxchg result or the old value.
222///
223/// @returns result of cmpxchg, according to ReturnBool
224///
225/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
226/// invoke the function EmitAtomicCmpXchgForMSIntrin.
227static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
228 bool ReturnBool) {
229 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
230 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
231
232 Value *Args[3];
233 Args[0] = DestPtr;
234 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
235 Args[2] = CGF.EmitScalarExpr(E->getArg(2));
236 if (E->getArg(1)->getType()->isBooleanType())
237 Args[1] = CGF.EmitToMemory(Args[1], E->getArg(1)->getType());
238 if (E->getArg(2)->getType()->isBooleanType())
239 Args[2] = CGF.EmitToMemory(Args[2], E->getArg(2)->getType());
240
241 Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
242 Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
243 llvm::AtomicOrdering::SequentiallyConsistent);
244 if (ReturnBool)
245 // Extract boolean success flag and zext it to int.
246 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
247 CGF.ConvertType(E->getType()));
248 else
249 // Extract old value and emit it using the same type as compare value.
250 return CGF.EmitFromMemory(CGF.Builder.CreateExtractValue(Pair, 0), T);
251}
252
253/// This function should be invoked to emit atomic cmpxchg for Microsoft's
254/// _InterlockedCompareExchange* intrinsics which have the following signature:
255/// T _InterlockedCompareExchange(T volatile *Destination,
256/// T Exchange,
257/// T Comparand);
258///
259/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
260/// cmpxchg *Destination, Comparand, Exchange.
261/// So we need to swap Comparand and Exchange when invoking
262/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
263/// function MakeAtomicCmpXchgValue since it expects the arguments to be
264/// already swapped.
265
266static
267Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
268 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
269 assert(E->getArg(0)->getType()->isPointerType());
270 assert(CGF.getContext().hasSameUnqualifiedType(
271 E->getType(), E->getArg(0)->getType()->getPointeeType()));
272 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
273 E->getArg(1)->getType()));
274 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
275 E->getArg(2)->getType()));
276
277 auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
278 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
279 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
280
281 // For Release ordering, the failure ordering should be Monotonic.
282 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
283 AtomicOrdering::Monotonic :
284 SuccessOrdering;
285
286 auto *Result = CGF.Builder.CreateAtomicCmpXchg(
287 Destination, Comparand, Exchange,
288 SuccessOrdering, FailureOrdering);
289 Result->setVolatile(true);
290 return CGF.Builder.CreateExtractValue(Result, 0);
291}
292
293static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
294 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
295 assert(E->getArg(0)->getType()->isPointerType());
296
297 auto *IntTy = CGF.ConvertType(E->getType());
298 auto *Result = CGF.Builder.CreateAtomicRMW(
299 AtomicRMWInst::Add,
300 CGF.EmitScalarExpr(E->getArg(0)),
301 ConstantInt::get(IntTy, 1),
302 Ordering);
303 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
304}
305
306static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
307 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
308 assert(E->getArg(0)->getType()->isPointerType());
309
310 auto *IntTy = CGF.ConvertType(E->getType());
311 auto *Result = CGF.Builder.CreateAtomicRMW(
312 AtomicRMWInst::Sub,
313 CGF.EmitScalarExpr(E->getArg(0)),
314 ConstantInt::get(IntTy, 1),
315 Ordering);
316 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
317}
318
319// Build a plain volatile load.
320static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
321 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
322 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
323 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
324 llvm::Type *ITy =
325 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
326 unsigned DefaultAS = CGF.CGM.getTargetCodeGenInfo().getDefaultAS();
327 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo(DefaultAS));
328 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
329 Load->setVolatile(true);
330 return Load;
331}
332
333// Build a plain volatile store.
334static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
335 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
336 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
337 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
338 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
339 llvm::Type *ITy =
340 llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
341 unsigned DefaultAS = CGF.CGM.getTargetCodeGenInfo().getDefaultAS();
342 Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo(DefaultAS));
343 llvm::StoreInst *Store =
344 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
345 Store->setVolatile(true);
346 return Store;
347}
348
349// Emit a simple mangled intrinsic that has 1 argument and a return type
350// matching the argument type.
351static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
352 const CallExpr *E,
353 unsigned IntrinsicID) {
354 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
355
356 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
357 return CGF.Builder.CreateCall(F, Src0);
358}
359
360// Emit an intrinsic that has 2 operands of the same type as its result.
361static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
362 const CallExpr *E,
363 unsigned IntrinsicID) {
364 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
365 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
366
367 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
368 return CGF.Builder.CreateCall(F, { Src0, Src1 });
369}
370
371// Emit an intrinsic that has 3 operands of the same type as its result.
372static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
373 const CallExpr *E,
374 unsigned IntrinsicID) {
375 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
376 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
377 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
378
379 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
380 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
381}
382
383// Emit an intrinsic that has 1 float or double operand, and 1 integer.
384static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
385 const CallExpr *E,
386 unsigned IntrinsicID) {
387 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
388 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
389
390 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
391 return CGF.Builder.CreateCall(F, {Src0, Src1});
392}
393
394// Emit an intrinsic that has overloaded integer result and fp operand.
395static Value *emitFPToIntRoundBuiltin(CodeGenFunction &CGF,
396 const CallExpr *E,
397 unsigned IntrinsicID) {
398 llvm::Type *ResultType = CGF.ConvertType(E->getType());
399 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
400
401 Function *F = CGF.CGM.getIntrinsic(IntrinsicID,
402 {ResultType, Src0->getType()});
403 return CGF.Builder.CreateCall(F, Src0);
404}
405
406/// EmitFAbs - Emit a call to @llvm.fabs().
407static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
408 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
409 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
410 Call->setDoesNotAccessMemory();
411 return Call;
412}
413
414/// Emit the computation of the sign bit for a floating point value. Returns
415/// the i1 sign bit value.
416static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
417 LLVMContext &C = CGF.CGM.getLLVMContext();
418
419 llvm::Type *Ty = V->getType();
420 int Width = Ty->getPrimitiveSizeInBits();
421 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
422 V = CGF.Builder.CreateBitCast(V, IntTy);
423 if (Ty->isPPC_FP128Ty()) {
424 // We want the sign bit of the higher-order double. The bitcast we just
425 // did works as if the double-double was stored to memory and then
426 // read as an i128. The "store" will put the higher-order double in the
427 // lower address in both little- and big-Endian modes, but the "load"
428 // will treat those bits as a different part of the i128: the low bits in
429 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
430 // we need to shift the high bits down to the low before truncating.
431 Width >>= 1;
432 if (CGF.getTarget().isBigEndian()) {
433 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
434 V = CGF.Builder.CreateLShr(V, ShiftCst);
435 }
436 // We are truncating value in order to extract the higher-order
437 // double, which we will be using to extract the sign from.
438 IntTy = llvm::IntegerType::get(C, Width);
439 V = CGF.Builder.CreateTrunc(V, IntTy);
440 }
441 Value *Zero = llvm::Constant::getNullValue(IntTy);
442 return CGF.Builder.CreateICmpSLT(V, Zero);
443}
444
445static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
446 const CallExpr *E, llvm::Value *calleeValue) {
447 CGCallee callee = CGCallee(GlobalDecl(FD), calleeValue);
448 return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
449}
450
451/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
452/// depending on IntrinsicID.
453///
454/// \arg CGF The current codegen function.
455/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
456/// \arg X The first argument to the llvm.*.with.overflow.*.
457/// \arg Y The second argument to the llvm.*.with.overflow.*.
458/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
459/// \returns The result (i.e. sum/product) returned by the intrinsic.
460static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
461 const llvm::Intrinsic::ID IntrinsicID,
462 llvm::Value *X, llvm::Value *Y,
463 llvm::Value *&Carry) {
464 // Make sure we have integers of the same width.
465 assert(X->getType() == Y->getType() &&
466 "Arguments must be the same type. (Did you forget to make sure both "
467 "arguments have the same integer width?)");
468
469 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
470 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
471 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
472 return CGF.Builder.CreateExtractValue(Tmp, 0);
473}
474
475static Value *emitRangedBuiltin(CodeGenFunction &CGF,
476 unsigned IntrinsicID,
477 int low, int high) {
478 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
479 llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
480 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
481 llvm::Instruction *Call = CGF.Builder.CreateCall(F);
482 Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
483 return Call;
484}
485
486namespace {
487 struct WidthAndSignedness {
488 unsigned Width;
489 bool Signed;
490 };
491}
492
493static WidthAndSignedness
494getIntegerWidthAndSignedness(const clang::ASTContext &context,
495 const clang::QualType Type) {
496 assert(Type->isIntegerType() && "Given type is not an integer.");
497 unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
498 bool Signed = Type->isSignedIntegerType();
499 return {Width, Signed};
500}
501
502// Given one or more integer types, this function produces an integer type that
503// encompasses them: any value in one of the given types could be expressed in
504// the encompassing type.
505static struct WidthAndSignedness
506EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
507 assert(Types.size() > 0 && "Empty list of types.");
508
509 // If any of the given types is signed, we must return a signed type.
510 bool Signed = false;
511 for (const auto &Type : Types) {
512 Signed |= Type.Signed;
513 }
514
515 // The encompassing type must have a width greater than or equal to the width
516 // of the specified types. Additionally, if the encompassing type is signed,
517 // its width must be strictly greater than the width of any unsigned types
518 // given.
519 unsigned Width = 0;
520 for (const auto &Type : Types) {
521 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
522 if (Width < MinWidth) {
523 Width = MinWidth;
524 }
525 }
526
527 return {Width, Signed};
528}
529
530Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
531 unsigned AS = CGM.getTargetCodeGenInfo().getDefaultAS();
532 llvm::Type *DestType = llvm::PointerType::get(Int8Ty, AS);
533 if (ArgValue->getType() != DestType)
534 ArgValue = Builder.CreatePointerBitCastOrAddrSpaceCast(ArgValue, DestType,
535 ArgValue->getName().data());
536
537 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
538 return Builder.CreateCall(CGM.getIntrinsic(inst, DestType), ArgValue);
539}
540
541/// Checks if using the result of __builtin_object_size(p, @p From) in place of
542/// __builtin_object_size(p, @p To) is correct
543static bool areBOSTypesCompatible(int From, int To) {
544 // Note: Our __builtin_object_size implementation currently treats Type=0 and
545 // Type=2 identically. Encoding this implementation detail here may make
546 // improving __builtin_object_size difficult in the future, so it's omitted.
547 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
548}
549
550static llvm::Value *
551getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
552 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
553}
554
555llvm::Value *
556CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
557 llvm::IntegerType *ResType,
558 llvm::Value *EmittedE,
559 bool IsDynamic) {
560 uint64_t ObjectSize;
561 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
562 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
563 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
564}
565
566/// Returns a Value corresponding to the size of the given expression.
567/// This Value may be either of the following:
568/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
569/// it)
570/// - A call to the @llvm.objectsize intrinsic
571///
572/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
573/// and we wouldn't otherwise try to reference a pass_object_size parameter,
574/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
575llvm::Value *
576CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
577 llvm::IntegerType *ResType,
578 llvm::Value *EmittedE, bool IsDynamic) {
579 // We need to reference an argument if the pointer is a parameter with the
580 // pass_object_size attribute.
581 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
582 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
583 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
584 if (Param != nullptr && PS != nullptr &&
585 areBOSTypesCompatible(PS->getType(), Type)) {
586 auto Iter = SizeArguments.find(Param);
587 assert(Iter != SizeArguments.end());
588
589 const ImplicitParamDecl *D = Iter->second;
590 auto DIter = LocalDeclMap.find(D);
591 assert(DIter != LocalDeclMap.end());
592
593 return EmitLoadOfScalar(DIter->second, /*volatile=*/false,
594 getContext().getSizeType(), E->getBeginLoc());
595 }
596 }
597
598 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
599 // evaluate E for side-effects. In either case, we shouldn't lower to
600 // @llvm.objectsize.
601 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
602 return getDefaultBuiltinObjectSizeResult(Type, ResType);
603
604 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
605 assert(Ptr->getType()->isPointerTy() &&
606 "Non-pointer passed to __builtin_object_size?");
607
608 Function *F =
609 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
610
611 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
612 Value *Min = Builder.getInt1((Type & 2) != 0);
613 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
614 Value *NullIsUnknown = Builder.getTrue();
615 Value *Dynamic = Builder.getInt1(IsDynamic);
616 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
617}
618
619namespace {
620/// A struct to generically describe a bit test intrinsic.
621struct BitTest {
622 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
623 enum InterlockingKind : uint8_t {
624 Unlocked,
625 Sequential,
626 Acquire,
627 Release,
628 NoFence
629 };
630
631 ActionKind Action;
632 InterlockingKind Interlocking;
633 bool Is64Bit;
634
635 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
636};
637} // namespace
638
639BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
640 switch (BuiltinID) {
641 // Main portable variants.
642 case Builtin::BI_bittest:
643 return {TestOnly, Unlocked, false};
644 case Builtin::BI_bittestandcomplement:
645 return {Complement, Unlocked, false};
646 case Builtin::BI_bittestandreset:
647 return {Reset, Unlocked, false};
648 case Builtin::BI_bittestandset:
649 return {Set, Unlocked, false};
650 case Builtin::BI_interlockedbittestandreset:
651 return {Reset, Sequential, false};
652 case Builtin::BI_interlockedbittestandset:
653 return {Set, Sequential, false};
654
655 // X86-specific 64-bit variants.
656 case Builtin::BI_bittest64:
657 return {TestOnly, Unlocked, true};
658 case Builtin::BI_bittestandcomplement64:
659 return {Complement, Unlocked, true};
660 case Builtin::BI_bittestandreset64:
661 return {Reset, Unlocked, true};
662 case Builtin::BI_bittestandset64:
663 return {Set, Unlocked, true};
664 case Builtin::BI_interlockedbittestandreset64:
665 return {Reset, Sequential, true};
666 case Builtin::BI_interlockedbittestandset64:
667 return {Set, Sequential, true};
668
669 // ARM/AArch64-specific ordering variants.
670 case Builtin::BI_interlockedbittestandset_acq:
671 return {Set, Acquire, false};
672 case Builtin::BI_interlockedbittestandset_rel:
673 return {Set, Release, false};
674 case Builtin::BI_interlockedbittestandset_nf:
675 return {Set, NoFence, false};
676 case Builtin::BI_interlockedbittestandreset_acq:
677 return {Reset, Acquire, false};
678 case Builtin::BI_interlockedbittestandreset_rel:
679 return {Reset, Release, false};
680 case Builtin::BI_interlockedbittestandreset_nf:
681 return {Reset, NoFence, false};
682 }
683 llvm_unreachable("expected only bittest intrinsics");
684}
685
686static char bitActionToX86BTCode(BitTest::ActionKind A) {
687 switch (A) {
688 case BitTest::TestOnly: return '\0';
689 case BitTest::Complement: return 'c';
690 case BitTest::Reset: return 'r';
691 case BitTest::Set: return 's';
692 }
693 llvm_unreachable("invalid action");
694}
695
696static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
697 BitTest BT,
698 const CallExpr *E, Value *BitBase,
699 Value *BitPos) {
700 char Action = bitActionToX86BTCode(BT.Action);
701 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
702
703 // Build the assembly.
704 SmallString<64> Asm;
705 raw_svector_ostream AsmOS(Asm);
706 if (BT.Interlocking != BitTest::Unlocked)
707 AsmOS << "lock ";
708 AsmOS << "bt";
709 if (Action)
710 AsmOS << Action;
711 AsmOS << SizeSuffix << " $2, ($1)\n\tsetc ${0:b}";
712
713 // Build the constraints. FIXME: We should support immediates when possible.
714 std::string Constraints = "=r,r,r,~{cc},~{flags},~{fpsr}";
715 llvm::IntegerType *IntType = llvm::IntegerType::get(
716 CGF.getLLVMContext(),
717 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
718 llvm::Type *IntPtrType = IntType->getPointerTo();
719 llvm::FunctionType *FTy =
720 llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
721
722 llvm::InlineAsm *IA =
723 llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
724 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
725}
726
727static llvm::AtomicOrdering
728getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
729 switch (I) {
730 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
731 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
732 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
733 case BitTest::Release: return llvm::AtomicOrdering::Release;
734 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
735 }
736 llvm_unreachable("invalid interlocking");
737}
738
739/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
740/// bits and a bit position and read and optionally modify the bit at that
741/// position. The position index can be arbitrarily large, i.e. it can be larger
742/// than 31 or 63, so we need an indexed load in the general case.
743static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
744 unsigned BuiltinID,
745 const CallExpr *E) {
746 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
747 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
748
749 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
750
751 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
752 // indexing operation internally. Use them if possible.
753 llvm::Triple::ArchType Arch = CGF.getTarget().getTriple().getArch();
754 if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64)
755 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
756
757 // Otherwise, use generic code to load one byte and test the bit. Use all but
758 // the bottom three bits as the array index, and the bottom three bits to form
759 // a mask.
760 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
761 Value *ByteIndex = CGF.Builder.CreateAShr(
762 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
763 Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
764 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
765 ByteIndex, "bittest.byteaddr"),
766 CharUnits::One());
767 Value *PosLow =
768 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
769 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
770
771 // The updating instructions will need a mask.
772 Value *Mask = nullptr;
773 if (BT.Action != BitTest::TestOnly) {
774 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
775 "bittest.mask");
776 }
777
778 // Check the action and ordering of the interlocked intrinsics.
779 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
780
781 Value *OldByte = nullptr;
782 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
783 // Emit a combined atomicrmw load/store operation for the interlocked
784 // intrinsics.
785 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
786 if (BT.Action == BitTest::Reset) {
787 Mask = CGF.Builder.CreateNot(Mask);
788 RMWOp = llvm::AtomicRMWInst::And;
789 }
790 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
791 Ordering);
792 } else {
793 // Emit a plain load for the non-interlocked intrinsics.
794 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
795 Value *NewByte = nullptr;
796 switch (BT.Action) {
797 case BitTest::TestOnly:
798 // Don't store anything.
799 break;
800 case BitTest::Complement:
801 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
802 break;
803 case BitTest::Reset:
804 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
805 break;
806 case BitTest::Set:
807 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
808 break;
809 }
810 if (NewByte)
811 CGF.Builder.CreateStore(NewByte, ByteAddr);
812 }
813
814 // However we loaded the old byte, either by plain load or atomicrmw, shift
815 // the bit into the low position and mask it to 0 or 1.
816 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
817 return CGF.Builder.CreateAnd(
818 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
819}
820
821namespace {
822enum class MSVCSetJmpKind {
823 _setjmpex,
824 _setjmp3,
825 _setjmp
826};
827}
828
829/// MSVC handles setjmp a bit differently on different platforms. On every
830/// architecture except 32-bit x86, the frame address is passed. On x86, extra
831/// parameters can be passed as variadic arguments, but we always pass none.
832static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
833 const CallExpr *E) {
834 llvm::Value *Arg1 = nullptr;
835 llvm::Type *Arg1Ty = nullptr;
836 StringRef Name;
837 bool IsVarArg = false;
838 if (SJKind == MSVCSetJmpKind::_setjmp3) {
839 Name = "_setjmp3";
840 Arg1Ty = CGF.Int32Ty;
841 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
842 IsVarArg = true;
843 } else {
844 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
845 Arg1Ty = CGF.Int8PtrTy;
846 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
847 Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::sponentry,
848 {CGF.CGM.ProgramInt8PtrTy}));
849 } else
850 Arg1 = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(Intrinsic::frameaddress,
851 {CGF.CGM.ProgramInt8PtrTy}),
852 llvm::ConstantInt::get(CGF.Int32Ty, 0));
853 }
854
855 // Mark the call site and declaration with ReturnsTwice.
856 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
857 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
858 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
859 llvm::Attribute::ReturnsTwice);
860 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
861 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
862 ReturnsTwiceAttr, /*Local=*/true);
863
864 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
865 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
866 llvm::Value *Args[] = {Buf, Arg1};
867 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
868 CB->setAttributes(ReturnsTwiceAttr);
869 return RValue::get(CB);
870}
871
872// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
873// we handle them here.
874enum class CodeGenFunction::MSVCIntrin {
875 _BitScanForward,
876 _BitScanReverse,
877 _InterlockedAnd,
878 _InterlockedDecrement,
879 _InterlockedExchange,
880 _InterlockedExchangeAdd,
881 _InterlockedExchangeSub,
882 _InterlockedIncrement,
883 _InterlockedOr,
884 _InterlockedXor,
885 _InterlockedExchangeAdd_acq,
886 _InterlockedExchangeAdd_rel,
887 _InterlockedExchangeAdd_nf,
888 _InterlockedExchange_acq,
889 _InterlockedExchange_rel,
890 _InterlockedExchange_nf,
891 _InterlockedCompareExchange_acq,
892 _InterlockedCompareExchange_rel,
893 _InterlockedCompareExchange_nf,
894 _InterlockedOr_acq,
895 _InterlockedOr_rel,
896 _InterlockedOr_nf,
897 _InterlockedXor_acq,
898 _InterlockedXor_rel,
899 _InterlockedXor_nf,
900 _InterlockedAnd_acq,
901 _InterlockedAnd_rel,
902 _InterlockedAnd_nf,
903 _InterlockedIncrement_acq,
904 _InterlockedIncrement_rel,
905 _InterlockedIncrement_nf,
906 _InterlockedDecrement_acq,
907 _InterlockedDecrement_rel,
908 _InterlockedDecrement_nf,
909 __fastfail,
910};
911
912Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
913 const CallExpr *E) {
914 switch (BuiltinID) {
915 case MSVCIntrin::_BitScanForward:
916 case MSVCIntrin::_BitScanReverse: {
917 Value *ArgValue = EmitScalarExpr(E->getArg(1));
918
919 llvm::Type *ArgType = ArgValue->getType();
920 llvm::Type *IndexType =
921 EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
922 llvm::Type *ResultType = ConvertType(E->getType());
923
924 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
925 Value *ResZero = llvm::Constant::getNullValue(ResultType);
926 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
927
928 BasicBlock *Begin = Builder.GetInsertBlock();
929 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
930 Builder.SetInsertPoint(End);
931 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
932
933 Builder.SetInsertPoint(Begin);
934 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
935 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
936 Builder.CreateCondBr(IsZero, End, NotZero);
937 Result->addIncoming(ResZero, Begin);
938
939 Builder.SetInsertPoint(NotZero);
940 Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
941
942 if (BuiltinID == MSVCIntrin::_BitScanForward) {
943 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
944 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
945 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
946 Builder.CreateStore(ZeroCount, IndexAddress, false);
947 } else {
948 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
949 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
950
951 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
952 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
953 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
954 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
955 Builder.CreateStore(Index, IndexAddress, false);
956 }
957 Builder.CreateBr(End);
958 Result->addIncoming(ResOne, NotZero);
959
960 Builder.SetInsertPoint(End);
961 return Result;
962 }
963 case MSVCIntrin::_InterlockedAnd:
964 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
965 case MSVCIntrin::_InterlockedExchange:
966 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
967 case MSVCIntrin::_InterlockedExchangeAdd:
968 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
969 case MSVCIntrin::_InterlockedExchangeSub:
970 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
971 case MSVCIntrin::_InterlockedOr:
972 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
973 case MSVCIntrin::_InterlockedXor:
974 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
975 case MSVCIntrin::_InterlockedExchangeAdd_acq:
976 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
977 AtomicOrdering::Acquire);
978 case MSVCIntrin::_InterlockedExchangeAdd_rel:
979 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
980 AtomicOrdering::Release);
981 case MSVCIntrin::_InterlockedExchangeAdd_nf:
982 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
983 AtomicOrdering::Monotonic);
984 case MSVCIntrin::_InterlockedExchange_acq:
985 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
986 AtomicOrdering::Acquire);
987 case MSVCIntrin::_InterlockedExchange_rel:
988 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
989 AtomicOrdering::Release);
990 case MSVCIntrin::_InterlockedExchange_nf:
991 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
992 AtomicOrdering::Monotonic);
993 case MSVCIntrin::_InterlockedCompareExchange_acq:
994 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
995 case MSVCIntrin::_InterlockedCompareExchange_rel:
996 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
997 case MSVCIntrin::_InterlockedCompareExchange_nf:
998 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
999 case MSVCIntrin::_InterlockedOr_acq:
1000 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1001 AtomicOrdering::Acquire);
1002 case MSVCIntrin::_InterlockedOr_rel:
1003 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1004 AtomicOrdering::Release);
1005 case MSVCIntrin::_InterlockedOr_nf:
1006 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1007 AtomicOrdering::Monotonic);
1008 case MSVCIntrin::_InterlockedXor_acq:
1009 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1010 AtomicOrdering::Acquire);
1011 case MSVCIntrin::_InterlockedXor_rel:
1012 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1013 AtomicOrdering::Release);
1014 case MSVCIntrin::_InterlockedXor_nf:
1015 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1016 AtomicOrdering::Monotonic);
1017 case MSVCIntrin::_InterlockedAnd_acq:
1018 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1019 AtomicOrdering::Acquire);
1020 case MSVCIntrin::_InterlockedAnd_rel:
1021 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1022 AtomicOrdering::Release);
1023 case MSVCIntrin::_InterlockedAnd_nf:
1024 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1025 AtomicOrdering::Monotonic);
1026 case MSVCIntrin::_InterlockedIncrement_acq:
1027 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1028 case MSVCIntrin::_InterlockedIncrement_rel:
1029 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1030 case MSVCIntrin::_InterlockedIncrement_nf:
1031 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1032 case MSVCIntrin::_InterlockedDecrement_acq:
1033 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1034 case MSVCIntrin::_InterlockedDecrement_rel:
1035 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1036 case MSVCIntrin::_InterlockedDecrement_nf:
1037 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1038
1039 case MSVCIntrin::_InterlockedDecrement:
1040 return EmitAtomicDecrementValue(*this, E);
1041 case MSVCIntrin::_InterlockedIncrement:
1042 return EmitAtomicIncrementValue(*this, E);
1043
1044 case MSVCIntrin::__fastfail: {
1045 // Request immediate process termination from the kernel. The instruction
1046 // sequences to do this are documented on MSDN:
1047 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1048 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1049 StringRef Asm, Constraints;
1050 switch (ISA) {
1051 default:
1052 ErrorUnsupported(E, "__fastfail call for this architecture");
1053 break;
1054 case llvm::Triple::x86:
1055 case llvm::Triple::x86_64:
1056 Asm = "int $$0x29";
1057 Constraints = "{cx}";
1058 break;
1059 case llvm::Triple::thumb:
1060 Asm = "udf #251";
1061 Constraints = "{r0}";
1062 break;
1063 case llvm::Triple::aarch64:
1064 Asm = "brk #0xF003";
1065 Constraints = "{w0}";
1066 }
1067 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1068 llvm::InlineAsm *IA =
1069 llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
1070 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1071 getLLVMContext(), llvm::AttributeList::FunctionIndex,
1072 llvm::Attribute::NoReturn);
1073 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1074 CI->setAttributes(NoReturnAttr);
1075 return CI;
1076 }
1077 }
1078 llvm_unreachable("Incorrect MSVC intrinsic!");
1079}
1080
1081namespace {
1082// ARC cleanup for __builtin_os_log_format
1083struct CallObjCArcUse final : EHScopeStack::Cleanup {
1084 CallObjCArcUse(llvm::Value *object) : object(object) {}
1085 llvm::Value *object;
1086
1087 void Emit(CodeGenFunction &CGF, Flags flags) override {
1088 CGF.EmitARCIntrinsicUse(object);
1089 }
1090};
1091}
1092
1093Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1094 BuiltinCheckKind Kind) {
1095 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1096 && "Unsupported builtin check kind");
1097
1098 Value *ArgValue = EmitScalarExpr(E);
1099 if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1100 return ArgValue;
1101
1102 SanitizerScope SanScope(this);
1103 Value *Cond = Builder.CreateICmpNE(
1104 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1105 EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1106 SanitizerHandler::InvalidBuiltin,
1107 {EmitCheckSourceLocation(E->getExprLoc()),
1108 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1109 None);
1110 return ArgValue;
1111}
1112
1113/// Get the argument type for arguments to os_log_helper.
1114static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1115 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1116 return C.getCanonicalType(UnsignedTy);
1117}
1118
1119llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1120 const analyze_os_log::OSLogBufferLayout &Layout,
1121 CharUnits BufferAlignment) {
1122 ASTContext &Ctx = getContext();
1123
1124 llvm::SmallString<64> Name;
1125 {
1126 raw_svector_ostream OS(Name);
1127 OS << "__os_log_helper";
1128 OS << "_" << BufferAlignment.getQuantity();
1129 OS << "_" << int(Layout.getSummaryByte());
1130 OS << "_" << int(Layout.getNumArgsByte());
1131 for (const auto &Item : Layout.Items)
1132 OS << "_" << int(Item.getSizeByte()) << "_"
1133 << int(Item.getDescriptorByte());
1134 }
1135
1136 if (llvm::Function *F = CGM.getModule().getFunction(Name))
1137 return F;
1138
1139 llvm::SmallVector<QualType, 4> ArgTys;
1140 FunctionArgList Args;
1141 Args.push_back(ImplicitParamDecl::Create(
1142 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1143 ImplicitParamDecl::Other));
1144 ArgTys.emplace_back(Ctx.VoidPtrTy);
1145
1146 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1147 char Size = Layout.Items[I].getSizeByte();
1148 if (!Size)
1149 continue;
1150
1151 QualType ArgTy = getOSLogArgType(Ctx, Size);
1152 Args.push_back(ImplicitParamDecl::Create(
1153 Ctx, nullptr, SourceLocation(),
1154 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1155 ImplicitParamDecl::Other));
1156 ArgTys.emplace_back(ArgTy);
1157 }
1158
1159 QualType ReturnTy = Ctx.VoidTy;
1160 QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
1161
1162 // The helper function has linkonce_odr linkage to enable the linker to merge
1163 // identical functions. To ensure the merging always happens, 'noinline' is
1164 // attached to the function when compiling with -Oz.
1165 const CGFunctionInfo &FI =
1166 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1167 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1168 llvm::Function *Fn = llvm::Function::Create(
1169 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1170 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1171 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
1172 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1173 Fn->setDoesNotThrow();
1174
1175 // Attach 'noinline' at -Oz.
1176 if (CGM.getCodeGenOpts().OptimizeSize == 2)
1177 Fn->addFnAttr(llvm::Attribute::NoInline);
1178
1179 auto NL = ApplyDebugLocation::CreateEmpty(*this);
1180 IdentifierInfo *II = &Ctx.Idents.get(Name);
1181 FunctionDecl *FD = FunctionDecl::Create(
1182 Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
1183 FuncionTy, nullptr, SC_PrivateExtern, false, false);
1184
1185 StartFunction(FD, ReturnTy, Fn, FI, Args);
1186
1187 // Create a scope with an artificial location for the body of this function.
1188 auto AL = ApplyDebugLocation::CreateArtificial(*this);
1189
1190 CharUnits Offset;
1191 Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1192 BufferAlignment);
1193 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1194 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1195 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1196 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1197
1198 unsigned I = 1;
1199 for (const auto &Item : Layout.Items) {
1200 Builder.CreateStore(
1201 Builder.getInt8(Item.getDescriptorByte()),
1202 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1203 Builder.CreateStore(
1204 Builder.getInt8(Item.getSizeByte()),
1205 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1206
1207 CharUnits Size = Item.size();
1208 if (!Size.getQuantity())
1209 continue;
1210
1211 Address Arg = GetAddrOfLocalVar(Args[I]);
1212 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1213 Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1214 "argDataCast");
1215 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1216 Offset += Size;
1217 ++I;
1218 }
1219
1220 FinishFunction();
1221
1222 return Fn;
1223}
1224
1225RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1226 assert(E.getNumArgs() >= 2 &&
1227 "__builtin_os_log_format takes at least 2 arguments");
1228 ASTContext &Ctx = getContext();
1229 analyze_os_log::OSLogBufferLayout Layout;
1230 analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1231 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1232 llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1233
1234 // Ignore argument 1, the format string. It is not currently used.
1235 CallArgList Args;
1236 Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1237
1238 for (const auto &Item : Layout.Items) {
1239 int Size = Item.getSizeByte();
1240 if (!Size)
1241 continue;
1242
1243 llvm::Value *ArgVal;
1244
1245 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1246 uint64_t Val = 0;
1247 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1248 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1249 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1250 } else if (const Expr *TheExpr = Item.getExpr()) {
1251 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1252
1253 // Check if this is a retainable type.
1254 if (TheExpr->getType()->isObjCRetainableType()) {
1255 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1256 "Only scalar can be a ObjC retainable type");
1257 // Check if the object is constant, if not, save it in
1258 // RetainableOperands.
1259 if (!isa<Constant>(ArgVal))
1260 RetainableOperands.push_back(ArgVal);
1261 }
1262 } else {
1263 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1264 }
1265
1266 unsigned ArgValSize =
1267 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1268 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1269 ArgValSize);
1270 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1271 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1272 // If ArgVal has type x86_fp80, zero-extend ArgVal.
1273 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1274 Args.add(RValue::get(ArgVal), ArgTy);
1275 }
1276
1277 const CGFunctionInfo &FI =
1278 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1279 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1280 Layout, BufAddr.getAlignment());
1281 EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1282
1283 // Push a clang.arc.use cleanup for each object in RetainableOperands. The
1284 // cleanup will cause the use to appear after the final log call, keeping
1285 // the object valid while it’s held in the log buffer. Note that if there’s
1286 // a release cleanup on the object, it will already be active; since
1287 // cleanups are emitted in reverse order, the use will occur before the
1288 // object is released.
1289 if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
1290 CGM.getCodeGenOpts().OptimizationLevel != 0)
1291 for (llvm::Value *Object : RetainableOperands)
1292 pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), Object);
1293
1294 return RValue::get(BufAddr.getPointer());
1295}
1296
1297/// Determine if a binop is a checked mixed-sign multiply we can specialize.
1298static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1299 WidthAndSignedness Op1Info,
1300 WidthAndSignedness Op2Info,
1301 WidthAndSignedness ResultInfo) {
1302 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1303 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1304 Op1Info.Signed != Op2Info.Signed;
1305}
1306
1307/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1308/// the generic checked-binop irgen.
1309static RValue
1310EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1311 WidthAndSignedness Op1Info, const clang::Expr *Op2,
1312 WidthAndSignedness Op2Info,
1313 const clang::Expr *ResultArg, QualType ResultQTy,
1314 WidthAndSignedness ResultInfo) {
1315 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
1316 Op2Info, ResultInfo) &&
1317 "Not a mixed-sign multipliction we can specialize");
1318
1319 // Emit the signed and unsigned operands.
1320 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1321 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1322 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1323 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1324 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1325 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1326
1327 // One of the operands may be smaller than the other. If so, [s|z]ext it.
1328 if (SignedOpWidth < UnsignedOpWidth)
1329 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1330 if (UnsignedOpWidth < SignedOpWidth)
1331 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1332
1333 llvm::Type *OpTy = Signed->getType();
1334 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1335 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1336 llvm::Type *ResTy = ResultPtr.getElementType();
1337 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1338
1339 // Take the absolute value of the signed operand.
1340 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1341 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1342 llvm::Value *AbsSigned =
1343 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1344
1345 // Perform a checked unsigned multiplication.
1346 llvm::Value *UnsignedOverflow;
1347 llvm::Value *UnsignedResult =
1348 EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1349 Unsigned, UnsignedOverflow);
1350
1351 llvm::Value *Overflow, *Result;
1352 if (ResultInfo.Signed) {
1353 // Signed overflow occurs if the result is greater than INT_MAX or lesser
1354 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1355 auto IntMax =
1356 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1357 llvm::Value *MaxResult =
1358 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1359 CGF.Builder.CreateZExt(IsNegative, OpTy));
1360 llvm::Value *SignedOverflow =
1361 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1362 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1363
1364 // Prepare the signed result (possibly by negating it).
1365 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1366 llvm::Value *SignedResult =
1367 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1368 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1369 } else {
1370 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1371 llvm::Value *Underflow = CGF.Builder.CreateAnd(
1372 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1373 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1374 if (ResultInfo.Width < OpWidth) {
1375 auto IntMax =
1376 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
1377 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
1378 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
1379 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
1380 }
1381
1382 // Negate the product if it would be negative in infinite precision.
1383 Result = CGF.Builder.CreateSelect(
1384 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
1385
1386 Result = CGF.Builder.CreateTrunc(Result, ResTy);
1387 }
1388 assert(Overflow && Result && "Missing overflow or result");
1389
1390 bool isVolatile =
1391 ResultArg->getType()->getPointeeType().isVolatileQualified();
1392 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1393 isVolatile);
1394 return RValue::get(Overflow);
1395}
1396
1397static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
1398 Value *&RecordPtr, CharUnits Align,
1399 llvm::FunctionCallee Func, int Lvl) {
1400 const auto *RT = RType->getAs<RecordType>();
1401 ASTContext &Context = CGF.getContext();
1402 RecordDecl *RD = RT->getDecl()->getDefinition();
1403 std::string Pad = std::string(Lvl * 4, ' ');
1404 unsigned GlobalAS = CGF.CGM.getDataLayout().getGlobalsAddressSpace();
1405
1406 Value *GString =
1407 CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n", "", GlobalAS);
1408 Value *Res = CGF.Builder.CreateCall(Func, {GString});
1409
1410 static llvm::DenseMap<QualType, const char *> Types;
1411 if (Types.empty()) {
1412 Types[Context.CharTy] = "%c";
1413 Types[Context.BoolTy] = "%d";
1414 Types[Context.SignedCharTy] = "%hhd";
1415 Types[Context.UnsignedCharTy] = "%hhu";
1416 Types[Context.IntTy] = "%d";
1417 Types[Context.UnsignedIntTy] = "%u";
1418 Types[Context.LongTy] = "%ld";
1419 Types[Context.UnsignedLongTy] = "%lu";
1420 Types[Context.LongLongTy] = "%lld";
1421 Types[Context.UnsignedLongLongTy] = "%llu";
1422 Types[Context.ShortTy] = "%hd";
1423 Types[Context.UnsignedShortTy] = "%hu";
1424 Types[Context.VoidPtrTy] = "%p";
1425 Types[Context.FloatTy] = "%f";
1426 Types[Context.DoubleTy] = "%f";
1427 Types[Context.LongDoubleTy] = "%Lf";
1428 Types[Context.getPointerType(Context.CharTy)] = "%s";
1429 Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
1430 }
1431
1432 for (const auto *FD : RD->fields()) {
1433 Value *FieldPtr = RecordPtr;
1434 if (RD->isUnion())
1435 FieldPtr = CGF.Builder.CreatePointerCast(
1436 FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
1437 else
1438 FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
1439 FD->getFieldIndex());
1440
1441 GString = CGF.Builder.CreateGlobalStringPtr(
1442 llvm::Twine(Pad)
1443 .concat(FD->getType().getAsString())
1444 .concat(llvm::Twine(' '))
1445 .concat(FD->getNameAsString())
1446 .concat(" : ")
1447 .str(), "", GlobalAS);
1448 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1449 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1450
1451 QualType CanonicalType =
1452 FD->getType().getUnqualifiedType().getCanonicalType();
1453
1454 // We check whether we are in a recursive type
1455 if (CanonicalType->isRecordType()) {
1456 Value *TmpRes =
1457 dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
1458 Res = CGF.Builder.CreateAdd(TmpRes, Res);
1459 continue;
1460 }
1461
1462 // We try to determine the best format to print the current field
1463 llvm::Twine Format = Types.find(CanonicalType) == Types.end()
1464 ? Types[Context.VoidPtrTy]
1465 : Types[CanonicalType];
1466
1467 Address FieldAddress = Address(FieldPtr, Align);
1468 FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
1469
1470 // FIXME Need to handle bitfield here
1471 GString = CGF.Builder.CreateGlobalStringPtr(
1472 Format.concat(llvm::Twine('\n')).str(), "", GlobalAS);
1473 TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
1474 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1475 }
1476
1477 GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n", "", GlobalAS );
1478 Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
1479 Res = CGF.Builder.CreateAdd(Res, TmpRes);
1480 return Res;
1481}
1482
1483static bool
1484TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
1485 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
1486 if (const auto *Arr = Ctx.getAsArrayType(Ty))
1487 Ty = Ctx.getBaseElementType(Arr);
1488
1489 const auto *Record = Ty->getAsCXXRecordDecl();
1490 if (!Record)
1491 return false;
1492
1493 // We've already checked this type, or are in the process of checking it.
1494 if (!Seen.insert(Record).second)
1495 return false;
1496
1497 assert(Record->hasDefinition() &&
1498 "Incomplete types should already be diagnosed");
1499
1500 if (Record->isDynamicClass())
1501 return true;
1502
1503 for (FieldDecl *F : Record->fields()) {
1504 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
1505 return true;
1506 }
1507 return false;
1508}
1509
1510/// Determine if the specified type requires laundering by checking if it is a
1511/// dynamic class type or contains a subobject which is a dynamic class type.
1512static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
1513 if (!CGM.getCodeGenOpts().StrictVTablePointers)
1514 return false;
1515 llvm::SmallPtrSet<const Decl *, 16> Seen;
1516 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
1517}
1518
1519RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
1520 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
1521 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
1522
1523 // The builtin's shift arg may have a different type than the source arg and
1524 // result, but the LLVM intrinsic uses the same type for all values.
1525 llvm::Type *Ty = Src->getType();
1526 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
1527
1528 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
1529 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
1530 Function *F = CGM.getIntrinsic(IID, Ty);
1531 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
1532}
1533
1534// Diagnose misaligned copies (memmove/memcpy) of source types that contain
1535// capabilities to a dst buffer that is less than capability aligned.
1536// This can result in tags being lost at runtime if the buffer is not actually
1537// capability aligned. Furthermore, if the user adds a __builtin_assume_aligned()
1538// or a cast to a capability we can assume it is capability aligned an use
1539// csc/clc if the memcpy()/memmove() is expanded inline.
1540// TODO: maybe there needs to be an attribute __memmove_like__ or similar to
1541// indicate that a function behaves like memmove/memcpy and we can use that
1542// to diagnose unaligned copies.
1543static void
1544diagnoseMisalignedCapabiliyCopyDest(CodeGenFunction &CGF, StringRef Function,
1545 const Expr *Src, const CharUnits DstAlignCU,
1546 AnyMemTransferInst *MemInst = nullptr) {
1547 // we want the real type not the implicit conversion to void*
1548 // TODO: ignore the first explicit cast to void*?
1549 auto UnderlyingSrcTy = Src->IgnoreParenImpCasts()->getType();
1550 // The pointer will always be a capability in the purecap ABI, we only care
1551 // about the pointee type (i.e. the type that is being copied)
1552 UnderlyingSrcTy =
1553 QualType(UnderlyingSrcTy->getPointeeOrArrayElementType(), 0);
1554 auto &Ctx = CGF.CGM.getContext();
1555 if (!Ctx.containsCapabilities(UnderlyingSrcTy))
1556 return;
1557
1558 // Add a "must-preserve-cheri-tags" attribute to the memcpy/memmove
1559 // intrinsic to ensure that the backend will not lower it to an inlined
1560 // sequence of 1/2/4/8 byte loads and stores which would strip the tag bits.
1561 // TODO: a clc/csc that works on unaligned data but traps for a csc
1562 // with a tagged value and unaligned address could also prevent tags
1563 // from being lost.
1564 if (MemInst) {
1565 // If we have a memory intrinsic let the backend diagnose this issue:
1566 // First, tell the backend that this copy must preserve tags
1567 MemInst->addAttribute(
1568 llvm::AttributeList::FunctionIndex,
1569 llvm::Attribute::get(CGF.getLLVMContext(), "must-preserve-cheri-tags"));
1570 // And also tell it what the underlying type was for improved diagnostics.
1571 std::string TypeName = UnderlyingSrcTy.getAsString();
1572 std::string CanonicalStr = UnderlyingSrcTy.getCanonicalType().getAsString();
1573 if (CanonicalStr != TypeName)
1574 TypeName = "'" + TypeName + "' (aka '" + CanonicalStr + "')";
1575 else
1576 TypeName = "'" + TypeName + "'";
1577 MemInst->addAttribute(llvm::AttributeList::FunctionIndex,
1578 llvm::Attribute::get(CGF.getLLVMContext(),
1579 "frontend-memtransfer-type",
1580 TypeName));
1581 return;
1582 }
1583 // Otherwise attempt to diagnose it here (likely to cause false positives)
1584 uint64_t CapSizeBytes =
1585 Ctx.toCharUnitsFromBits(Ctx.getTargetInfo().getCHERICapabilityAlign())
1586 .getQuantity();
1587 uint64_t DstAlignBytes = DstAlignCU.getQuantity();
1588 bool UnderAligned = DstAlignBytes < CapSizeBytes;
1589 if (UnderAligned && MemInst) {
1590 // See if __builtin_assume_aligned() was used to increase the alignemnt.
1591 // In order to compute this alignment we need to use getKnownAlignment().
1592 // This will parse the @llvm.assume() intrinsics and compute the new
1593 // alignment but in order to do so it needs an AssumptionCache (and
1594 // possibly a DominatorTree, but for now it seems to work without).
1595 // We also need to pass the call instruction as the context since the
1596 // alignment cannot be computed otherwise.
1597
1598 assert(MemInst->getDestAlignment() == DstAlignBytes);
1599 // We need an assumption cache for getKnownAlignment(). This may be
1600 // expensive so we only do it if the alignment check failed.
1601 AssumptionCache AC(*CGF.CurFn);
1602 DominatorTree DT(*CGF.CurFn);
1603 auto KnownAlign = llvm::getKnownAlignment(
1604 MemInst->getRawDest(), CGF.CGM.getDataLayout(), MemInst, &AC, &DT);
1605 if (KnownAlign > DstAlignBytes) {
1606 // Check if we are still underaligned with __builtin_assume_aligned()
1607 // and update the memcpy/memmove src alignment. This will be done later
1608 // in LLVM anyway but since we have already computed we may as well set
1609 // it.
1610 DstAlignBytes = KnownAlign;
1611 UnderAligned = DstAlignBytes < CapSizeBytes;
1612 MemInst->setDestAlignment(DstAlignBytes);
1613 }
1614 }
1615 // TODO: should only really warn if the size is small enough to be inlined.
1616 if (UnderAligned) {
1617 // TODO: this warning should be emitted by the backend instead
1618 CGF.CGM.getDiags().Report(Src->getExprLoc(),
1619 diag::warn_cheri_memintrin_misaligned_inefficient)
1620 << Function << (unsigned)DstAlignBytes << UnderlyingSrcTy;
1621 // TODO: add a fixit?
1622 CGF.CGM.getDiags().Report(Src->getExprLoc(),
1623 diag::note_cheri_memintrin_misaligned_fixit)
1624 << Function;
1625 }
1626}
1627
1628static void diagnoseMisalignedCapabiliyCopyDest(CodeGenFunction &CGF,
1629 StringRef Function,
1630 const Expr *Src, CallInst *CI) {
1631 AnyMemTransferInst *MemInst = cast<AnyMemTransferInst>(CI);
1632 diagnoseMisalignedCapabiliyCopyDest(
1633 CGF, Function, Src, CharUnits::fromQuantity(MemInst->getDestAlignment()),
1634 MemInst);
1635}
1636
1637static void diagnoseMisalignedCapabiliyCopyDest(CodeGenFunction &CGF,
1638 StringRef Function,
1639 const Expr *Src,
1640 const Expr *Dst) {
1641 auto UnderlyingDstTy = QualType(
1642 Dst->IgnoreImpCasts()->getType()->getPointeeOrArrayElementType(), 0);
1643 diagnoseMisalignedCapabiliyCopyDest(
1644 CGF, Function, Src, CGF.getNaturalTypeAlignment(UnderlyingDstTy));
1645}
1646
1647RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
1648 const CallExpr *E,
1649 ReturnValueSlot ReturnValue) {
1650 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
1651 // See if we can constant fold this builtin. If so, don't emit it at all.
1652 Expr::EvalResult Result;
1653 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
1654 !Result.hasSideEffects()) {
1655 if (Result.Val.isInt())
1656 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
1657 Result.Val.getInt()));
1658 if (Result.Val.isFloat())
1659 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
1660 Result.Val.getFloat()));
1661 }
1662 unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
1663
1664 // There are LLVM math intrinsics/instructions corresponding to math library
1665 // functions except the LLVM op will never set errno while the math library
1666 // might. Also, math builtins have the same semantics as their math library
1667 // twins. Thus, we can transform math library and builtin calls to their
1668 // LLVM counterparts if the call is marked 'const' (known to never set errno).
1669 if (FD->hasAttr<ConstAttr>()) {
1670 switch (BuiltinID) {
1671 case Builtin::BIceil:
1672 case Builtin::BIceilf:
1673 case Builtin::BIceill:
1674 case Builtin::BI__builtin_ceil:
1675 case Builtin::BI__builtin_ceilf:
1676 case Builtin::BI__builtin_ceill:
1677 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::ceil));
1678
1679 case Builtin::BIcopysign:
1680 case Builtin::BIcopysignf:
1681 case Builtin::BIcopysignl:
1682 case Builtin::BI__builtin_copysign:
1683 case Builtin::BI__builtin_copysignf:
1684 case Builtin::BI__builtin_copysignl:
1685 case Builtin::BI__builtin_copysignf128:
1686 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
1687
1688 case Builtin::BIcos:
1689 case Builtin::BIcosf:
1690 case Builtin::BIcosl:
1691 case Builtin::BI__builtin_cos:
1692 case Builtin::BI__builtin_cosf:
1693 case Builtin::BI__builtin_cosl:
1694 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::cos));
1695
1696 case Builtin::BIexp:
1697 case Builtin::BIexpf:
1698 case Builtin::BIexpl:
1699 case Builtin::BI__builtin_exp:
1700 case Builtin::BI__builtin_expf:
1701 case Builtin::BI__builtin_expl:
1702 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp));
1703
1704 case Builtin::BIexp2:
1705 case Builtin::BIexp2f:
1706 case Builtin::BIexp2l:
1707 case Builtin::BI__builtin_exp2:
1708 case Builtin::BI__builtin_exp2f:
1709 case Builtin::BI__builtin_exp2l:
1710 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::exp2));
1711
1712 case Builtin::BIfabs:
1713 case Builtin::BIfabsf:
1714 case Builtin::BIfabsl:
1715 case Builtin::BI__builtin_fabs:
1716 case Builtin::BI__builtin_fabsf:
1717 case Builtin::BI__builtin_fabsl:
1718 case Builtin::BI__builtin_fabsf128:
1719 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
1720
1721 case Builtin::BIfloor:
1722 case Builtin::BIfloorf:
1723 case Builtin::BIfloorl:
1724 case Builtin::BI__builtin_floor:
1725 case Builtin::BI__builtin_floorf:
1726 case Builtin::BI__builtin_floorl:
1727 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::floor));
1728
1729 case Builtin::BIfma:
1730 case Builtin::BIfmaf:
1731 case Builtin::BIfmal:
1732 case Builtin::BI__builtin_fma:
1733 case Builtin::BI__builtin_fmaf:
1734 case Builtin::BI__builtin_fmal:
1735 return RValue::get(emitTernaryBuiltin(*this, E, Intrinsic::fma));
1736
1737 case Builtin::BIfmax:
1738 case Builtin::BIfmaxf:
1739 case Builtin::BIfmaxl:
1740 case Builtin::BI__builtin_fmax:
1741 case Builtin::BI__builtin_fmaxf:
1742 case Builtin::BI__builtin_fmaxl:
1743 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::maxnum));
1744
1745 case Builtin::BIfmin:
1746 case Builtin::BIfminf:
1747 case Builtin::BIfminl:
1748 case Builtin::BI__builtin_fmin:
1749 case Builtin::BI__builtin_fminf:
1750 case Builtin::BI__builtin_fminl:
1751 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::minnum));
1752
1753 // fmod() is a special-case. It maps to the frem instruction rather than an
1754 // LLVM intrinsic.
1755 case Builtin::BIfmod:
1756 case Builtin::BIfmodf:
1757 case Builtin::BIfmodl:
1758 case Builtin::BI__builtin_fmod:
1759 case Builtin::BI__builtin_fmodf:
1760 case Builtin::BI__builtin_fmodl: {
1761 Value *Arg1 = EmitScalarExpr(E->getArg(0));
1762 Value *Arg2 = EmitScalarExpr(E->getArg(1));
1763 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
1764 }
1765
1766 case Builtin::BIlog:
1767 case Builtin::BIlogf:
1768 case Builtin::BIlogl:
1769 case Builtin::BI__builtin_log:
1770 case Builtin::BI__builtin_logf:
1771 case Builtin::BI__builtin_logl:
1772 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log));
1773
1774 case Builtin::BIlog10:
1775 case Builtin::BIlog10f:
1776 case Builtin::BIlog10l:
1777 case Builtin::BI__builtin_log10:
1778 case Builtin::BI__builtin_log10f:
1779 case Builtin::BI__builtin_log10l:
1780 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log10));
1781
1782 case Builtin::BIlog2:
1783 case Builtin::BIlog2f:
1784 case Builtin::BIlog2l:
1785 case Builtin::BI__builtin_log2:
1786 case Builtin::BI__builtin_log2f:
1787 case Builtin::BI__builtin_log2l:
1788 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::log2));
1789
1790 case Builtin::BInearbyint:
1791 case Builtin::BInearbyintf:
1792 case Builtin::BInearbyintl:
1793 case Builtin::BI__builtin_nearbyint:
1794 case Builtin::BI__builtin_nearbyintf:
1795 case Builtin::BI__builtin_nearbyintl:
1796 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::nearbyint));
1797
1798 case Builtin::BIpow:
1799 case Builtin::BIpowf:
1800 case Builtin::BIpowl:
1801 case Builtin::BI__builtin_pow:
1802 case Builtin::BI__builtin_powf:
1803 case Builtin::BI__builtin_powl:
1804 return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::pow));
1805
1806 case Builtin::BIrint:
1807 case Builtin::BIrintf:
1808 case Builtin::BIrintl:
1809 case Builtin::BI__builtin_rint:
1810 case Builtin::BI__builtin_rintf:
1811 case Builtin::BI__builtin_rintl:
1812 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::rint));
1813
1814 case Builtin::BIround:
1815 case Builtin::BIroundf:
1816 case Builtin::BIroundl:
1817 case Builtin::BI__builtin_round:
1818 case Builtin::BI__builtin_roundf:
1819 case Builtin::BI__builtin_roundl:
1820 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::round));
1821
1822 case Builtin::BIsin:
1823 case Builtin::BIsinf:
1824 case Builtin::BIsinl:
1825 case Builtin::BI__builtin_sin:
1826 case Builtin::BI__builtin_sinf:
1827 case Builtin::BI__builtin_sinl:
1828 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sin));
1829
1830 case Builtin::BIsqrt:
1831 case Builtin::BIsqrtf:
1832 case Builtin::BIsqrtl:
1833 case Builtin::BI__builtin_sqrt:
1834 case Builtin::BI__builtin_sqrtf:
1835 case Builtin::BI__builtin_sqrtl:
1836 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::sqrt));
1837
1838 case Builtin::BItrunc:
1839 case Builtin::BItruncf:
1840 case Builtin::BItruncl:
1841 case Builtin::BI__builtin_trunc:
1842 case Builtin::BI__builtin_truncf:
1843 case Builtin::BI__builtin_truncl:
1844 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::trunc));
1845
1846 case Builtin::BIlround:
1847 case Builtin::BIlroundf:
1848 case Builtin::BIlroundl:
1849 case Builtin::BI__builtin_lround:
1850 case Builtin::BI__builtin_lroundf:
1851 case Builtin::BI__builtin_lroundl:
1852 return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lround));
1853
1854 case Builtin::BIllround:
1855 case Builtin::BIllroundf:
1856 case Builtin::BIllroundl:
1857 case Builtin::BI__builtin_llround:
1858 case Builtin::BI__builtin_llroundf:
1859 case Builtin::BI__builtin_llroundl:
1860 return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llround));
1861
1862 case Builtin::BIlrint:
1863 case Builtin::BIlrintf:
1864 case Builtin::BIlrintl:
1865 case Builtin::BI__builtin_lrint:
1866 case Builtin::BI__builtin_lrintf:
1867 case Builtin::BI__builtin_lrintl:
1868 return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::lrint));
1869
1870 case Builtin::BIllrint:
1871 case Builtin::BIllrintf:
1872 case Builtin::BIllrintl:
1873 case Builtin::BI__builtin_llrint:
1874 case Builtin::BI__builtin_llrintf:
1875 case Builtin::BI__builtin_llrintl:
1876 return RValue::get(emitFPToIntRoundBuiltin(*this, E, Intrinsic::llrint));
1877
1878 default:
1879 break;
1880 }
1881 }
1882
1883 switch (BuiltinID) {
1884 default: break;
1885 case Builtin::BI__builtin___CFStringMakeConstantString:
1886 case Builtin::BI__builtin___NSStringMakeConstantString:
1887 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
1888 case Builtin::BI__builtin_stdarg_start:
1889 case Builtin::BI__builtin_va_start:
1890 case Builtin::BI__va_start:
1891 case Builtin::BI__builtin_va_end:
1892 return RValue::get(
1893 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
1894 ? EmitScalarExpr(E->getArg(0))
1895 : EmitVAListRef(E->getArg(0)).getPointer(),
1896 BuiltinID != Builtin::BI__builtin_va_end));
1897 case Builtin::BI__builtin_va_copy: {
1898 Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
1899 Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
1900
1901 unsigned AS = CGM.getTargetCodeGenInfo().getDefaultAS();
1902 llvm::Type *Type = Int8Ty->getPointerTo(AS);
1903
1904 DstPtr = Builder.CreatePointerBitCastOrAddrSpaceCast(DstPtr, Type);
1905 SrcPtr = Builder.CreatePointerBitCastOrAddrSpaceCast(SrcPtr, Type);
1906 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy,
1907 { Type, Type }),
1908 {DstPtr, SrcPtr}));
1909 }
1910 case Builtin::BI__builtin_abs:
1911 case Builtin::BI__builtin_labs:
1912 case Builtin::BI__builtin_llabs: {
1913 // X < 0 ? -X : X
1914 // The negation has 'nsw' because abs of INT_MIN is undefined.
1915 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1916 Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
1917 Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
1918 Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
1919 Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
1920 return RValue::get(Result);
1921 }
1922 case Builtin::BI__builtin_conj:
1923 case Builtin::BI__builtin_conjf:
1924 case Builtin::BI__builtin_conjl: {
1925 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1926 Value *Real = ComplexVal.first;
1927 Value *Imag = ComplexVal.second;
1928 Value *Zero =
1929 Imag->getType()->isFPOrFPVectorTy()
1930 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
1931 : llvm::Constant::getNullValue(Imag->getType());
1932
1933 Imag = Builder.CreateFSub(Zero, Imag, "sub");
1934 return RValue::getComplex(std::make_pair(Real, Imag));
1935 }
1936 case Builtin::BI__builtin_creal:
1937 case Builtin::BI__builtin_crealf:
1938 case Builtin::BI__builtin_creall:
1939 case Builtin::BIcreal:
1940 case Builtin::BIcrealf:
1941 case Builtin::BIcreall: {
1942 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1943 return RValue::get(ComplexVal.first);
1944 }
1945
1946 case Builtin::BI__builtin_dump_struct: {
1947 llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
1948 unsigned GlobalAS = CGM.getDataLayout().getGlobalsAddressSpace();
1949 llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
1950 LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext(), GlobalAS)}, true);
1951
1952 Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
1953 CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
1954
1955 const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
1956 QualType Arg0Type = Arg0->getType()->getPointeeType();
1957
1958 Value *RecordPtr = EmitScalarExpr(Arg0);
1959 Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
1960 {LLVMFuncType, Func}, 0);
1961 return RValue::get(Res);
1962 }
1963
1964 case Builtin::BI__builtin_cimag:
1965 case Builtin::BI__builtin_cimagf:
1966 case Builtin::BI__builtin_cimagl:
1967 case Builtin::BIcimag:
1968 case Builtin::BIcimagf:
1969 case Builtin::BIcimagl: {
1970 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
1971 return RValue::get(ComplexVal.second);
1972 }
1973
1974 case Builtin::BI__builtin_clrsb:
1975 case Builtin::BI__builtin_clrsbl:
1976 case Builtin::BI__builtin_clrsbll: {
1977 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
1978 Value *ArgValue = EmitScalarExpr(E->getArg(0));
1979
1980 llvm::Type *ArgType = ArgValue->getType();
1981 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1982
1983 llvm::Type *ResultType = ConvertType(E->getType());
1984 Value *Zero = llvm::Constant::getNullValue(ArgType);
1985 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
1986 Value *Inverse = Builder.CreateNot(ArgValue, "not");
1987 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
1988 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
1989 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
1990 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
1991 "cast");
1992 return RValue::get(Result);
1993 }
1994 case Builtin::BI__builtin_ctzs:
1995 case Builtin::BI__builtin_ctz:
1996 case Builtin::BI__builtin_ctzl:
1997 case Builtin::BI__builtin_ctzll: {
1998 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
1999
2000 llvm::Type *ArgType = ArgValue->getType();
2001 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2002
2003 llvm::Type *ResultType = ConvertType(E->getType());
2004 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2005 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2006 if (Result->getType() != ResultType)
2007 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2008 "cast");
2009 return RValue::get(Result);
2010 }
2011 case Builtin::BI__builtin_clzs:
2012 case Builtin::BI__builtin_clz:
2013 case Builtin::BI__builtin_clzl:
2014 case Builtin::BI__builtin_clzll: {
2015 Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2016
2017 llvm::Type *ArgType = ArgValue->getType();
2018 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2019
2020 llvm::Type *ResultType = ConvertType(E->getType());
2021 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2022 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2023 if (Result->getType() != ResultType)
2024 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2025 "cast");
2026 return RValue::get(Result);
2027 }
2028 case Builtin::BI__builtin_ffs:
2029 case Builtin::BI__builtin_ffsl:
2030 case Builtin::BI__builtin_ffsll: {
2031 // ffs(x) -> x ? cttz(x) + 1 : 0
2032 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2033
2034 llvm::Type *ArgType = ArgValue->getType();
2035 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2036
2037 llvm::Type *ResultType = ConvertType(E->getType());
2038 Value *Tmp =
2039 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2040 llvm::ConstantInt::get(ArgType, 1));
2041 Value *Zero = llvm::Constant::getNullValue(ArgType);
2042 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2043 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2044 if (Result->getType() != ResultType)
2045 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2046 "cast");
2047 return RValue::get(Result);
2048 }
2049 case Builtin::BI__builtin_parity:
2050 case Builtin::BI__builtin_parityl:
2051 case Builtin::BI__builtin_parityll: {
2052 // parity(x) -> ctpop(x) & 1
2053 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2054
2055 llvm::Type *ArgType = ArgValue->getType();
2056 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2057
2058 llvm::Type *ResultType = ConvertType(E->getType());
2059 Value *Tmp = Builder.CreateCall(F, ArgValue);
2060 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2061 if (Result->getType() != ResultType)
2062 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2063 "cast");
2064 return RValue::get(Result);
2065 }
2066 case Builtin::BI__lzcnt16:
2067 case Builtin::BI__lzcnt:
2068 case Builtin::BI__lzcnt64: {
2069 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2070
2071 llvm::Type *ArgType = ArgValue->getType();
2072 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2073
2074 llvm::Type *ResultType = ConvertType(E->getType());
2075 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2076 if (Result->getType() != ResultType)
2077 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2078 "cast");
2079 return RValue::get(Result);
2080 }
2081 case Builtin::BI__popcnt16:
2082 case Builtin::BI__popcnt:
2083 case Builtin::BI__popcnt64:
2084 case Builtin::BI__builtin_popcount:
2085 case Builtin::BI__builtin_popcountl:
2086 case Builtin::BI__builtin_popcountll: {
2087 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2088
2089 llvm::Type *ArgType = ArgValue->getType();
2090 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2091
2092 llvm::Type *ResultType = ConvertType(E->getType());
2093 Value *Result = Builder.CreateCall(F, ArgValue);
2094 if (Result->getType() != ResultType)
2095 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2096 "cast");
2097 return RValue::get(Result);
2098 }
2099 case Builtin::BI__builtin_unpredictable: {
2100 // Always return the argument of __builtin_unpredictable. LLVM does not
2101 // handle this builtin. Metadata for this builtin should be added directly
2102 // to instructions such as branches or switches that use it.
2103 return RValue::get(EmitScalarExpr(E->getArg(0)));
2104 }
2105 case Builtin::BI__builtin_expect: {
2106 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2107 llvm::Type *ArgType = ArgValue->getType();
2108
2109 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2110 // Don't generate llvm.expect on -O0 as the backend won't use it for
2111 // anything.
2112 // Note, we still IRGen ExpectedValue because it could have side-effects.
2113 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2114 return RValue::get(ArgValue);
2115
2116 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2117 Value *Result =
2118 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2119 return RValue::get(Result);
2120 }
2121 case Builtin::BI__builtin_assume_aligned_cap:
2122 case Builtin::BI__builtin_assume_aligned: {
2123 const Expr *Ptr = E->getArg(0);
2124 Value *PtrValue = EmitScalarExpr(Ptr);
2125 Value *OffsetValue =
2126 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2127
2128 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2129 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2130 unsigned Alignment = (unsigned)AlignmentCI->getZExtValue();
2131
2132 EmitAlignmentAssumption(PtrValue, Ptr,
2133 /*The expr loc is sufficient.*/ SourceLocation(),
2134 Alignment, OffsetValue);
2135 return RValue::get(PtrValue);
2136 }
2137 case Builtin::BI__assume:
2138 case Builtin::BI__builtin_assume: {
2139 if (E->getArg(0)->HasSideEffects(getContext()))
2140 return RValue::get(nullptr);
2141
2142 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2143 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2144 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2145 }
2146 case Builtin::BI__builtin_bswap16:
2147 case Builtin::BI__builtin_bswap32:
2148 case Builtin::BI__builtin_bswap64: {
2149 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2150 }
2151 case Builtin::BI__builtin_bitreverse8:
2152 case Builtin::BI__builtin_bitreverse16:
2153 case Builtin::BI__builtin_bitreverse32:
2154 case Builtin::BI__builtin_bitreverse64: {
2155 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2156 }
2157 case Builtin::BI__builtin_rotateleft8:
2158 case Builtin::BI__builtin_rotateleft16:
2159 case Builtin::BI__builtin_rotateleft32:
2160 case Builtin::BI__builtin_rotateleft64:
2161 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2162 case Builtin::BI_rotl16:
2163 case Builtin::BI_rotl:
2164 case Builtin::BI_lrotl:
2165 case Builtin::BI_rotl64:
2166 return emitRotate(E, false);
2167
2168 case Builtin::BI__builtin_rotateright8:
2169 case Builtin::BI__builtin_rotateright16:
2170 case Builtin::BI__builtin_rotateright32:
2171 case Builtin::BI__builtin_rotateright64:
2172 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2173 case Builtin::BI_rotr16:
2174 case Builtin::BI_rotr:
2175 case Builtin::BI_lrotr:
2176 case Builtin::BI_rotr64:
2177 return emitRotate(E, true);
2178
2179 case Builtin::BI__builtin_constant_p: {
2180 llvm::Type *ResultType = ConvertType(E->getType());
2181 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2182 // At -O0, we don't perform inlining, so we don't need to delay the
2183 // processing.
2184 return RValue::get(ConstantInt::get(ResultType, 0));
2185
2186 const Expr *Arg = E->getArg(0);
2187 QualType ArgType = Arg->getType();
2188 // FIXME: The allowance for Obj-C pointers and block pointers is historical
2189 // and likely a mistake.
2190 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2191 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2192 // Per the GCC documentation, only numeric constants are recognized after
2193 // inlining.
2194 return RValue::get(ConstantInt::get(ResultType, 0));
2195
2196 if (Arg->HasSideEffects(getContext()))
2197 // The argument is unevaluated, so be conservative if it might have
2198 // side-effects.
2199 return RValue::get(ConstantInt::get(ResultType, 0));
2200
2201 Value *ArgValue = EmitScalarExpr(Arg);
2202 if (ArgType->isObjCObjectPointerType()) {
2203 // Convert Objective-C objects to id because we cannot distinguish between
2204 // LLVM types for Obj-C classes as they are opaque.
2205 ArgType = CGM.getContext().getObjCIdType();
2206 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2207 }
2208 Function *F =
2209 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2210 Value *Result = Builder.CreateCall(F, ArgValue);
2211 if (Result->getType() != ResultType)
2212 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2213 return RValue::get(Result);
2214 }
2215 case Builtin::BI__builtin_dynamic_object_size:
2216 case Builtin::BI__builtin_object_size: {
2217 unsigned Type =
2218 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2219 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2220
2221 // We pass this builtin onto the optimizer so that it can figure out the
2222 // object size in more complex cases.
2223 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2224 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2225 /*EmittedE=*/nullptr, IsDynamic));
2226 }
2227 case Builtin::BI__builtin_prefetch: {
2228 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2229 // FIXME: Technically these constants should of type 'int', yes?
2230 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2231 llvm::ConstantInt::get(Int32Ty, 0);
2232 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2233 llvm::ConstantInt::get(Int32Ty, 3);
2234 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2235 Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
2236 // ASO OKAY: llvm.prefetch() needs an addr space 0 i8* even on CHERI
2237 // XXXAR: we might want to change that some time
2238 llvm::Type* AddressType = Builder.getInt8PtrTy(0);
2239 Address = Builder.CreatePointerBitCastOrAddrSpaceCast(Address, AddressType);
2240 return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2241 }
2242 case Builtin::BI__builtin_readcyclecounter: {
2243 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2244 return RValue::get(Builder.CreateCall(F));
2245 }
2246 case Builtin::BI__builtin___clear_cache: {
2247 Value *Begin = EmitScalarExpr(E->getArg(0));
2248 Value *End = EmitScalarExpr(E->getArg(1));
2249 llvm::Type *ArgType = Builder.getInt8PtrTy(0);
2250 Begin = Builder.CreatePointerBitCastOrAddrSpaceCast(Begin, ArgType);
2251 End = Builder.CreatePointerBitCastOrAddrSpaceCast(End, ArgType);
2252 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
2253 return RValue::get(Builder.CreateCall(F, {Begin, End}));
2254 }
2255 case Builtin::BI__builtin_trap:
2256 return RValue::get(EmitTrapCall(Intrinsic::trap));
2257 case Builtin::BI__debugbreak:
2258 return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
2259 case Builtin::BI__builtin_unreachable: {
2260 EmitUnreachable(E->getExprLoc());
2261
2262 // We do need to preserve an insertion point.
2263 EmitBlock(createBasicBlock("unreachable.cont"));
2264
2265 return RValue::get(nullptr);
2266 }
2267
2268 case Builtin::BI__builtin_powi:
2269 case Builtin::BI__builtin_powif:
2270 case Builtin::BI__builtin_powil: {
2271 Value *Base = EmitScalarExpr(E->getArg(0));
2272 Value *Exponent = EmitScalarExpr(E->getArg(1));
2273 llvm::Type *ArgType = Base->getType();
2274 Function *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
2275 return RValue::get(Builder.CreateCall(F, {Base, Exponent}));
2276 }
2277
2278 case Builtin::BI__builtin_isgreater:
2279 case Builtin::BI__builtin_isgreaterequal:
2280 case Builtin::BI__builtin_isless:
2281 case Builtin::BI__builtin_islessequal:
2282 case Builtin::BI__builtin_islessgreater:
2283 case Builtin::BI__builtin_isunordered: {
2284 // Ordered comparisons: we know the arguments to these are matching scalar
2285 // floating point values.
2286 Value *LHS = EmitScalarExpr(E->getArg(0));
2287 Value *RHS = EmitScalarExpr(E->getArg(1));
2288
2289 switch (BuiltinID) {
2290 default: llvm_unreachable("Unknown ordered comparison");
2291 case Builtin::BI__builtin_isgreater:
2292 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
2293 break;
2294 case Builtin::BI__builtin_isgreaterequal:
2295 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
2296 break;
2297 case Builtin::BI__builtin_isless:
2298 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
2299 break;
2300 case Builtin::BI__builtin_islessequal:
2301 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
2302 break;
2303 case Builtin::BI__builtin_islessgreater:
2304 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
2305 break;
2306 case Builtin::BI__builtin_isunordered:
2307 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
2308 break;
2309 }
2310 // ZExt bool to int type.
2311 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
2312 }
2313 case Builtin::BI__builtin_isnan: {
2314 Value *V = EmitScalarExpr(E->getArg(0));
2315 V = Builder.CreateFCmpUNO(V, V, "cmp");
2316 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2317 }
2318
2319 case Builtin::BIfinite:
2320 case Builtin::BI__finite:
2321 case Builtin::BIfinitef:
2322 case Builtin::BI__finitef:
2323 case Builtin::BIfinitel:
2324 case Builtin::BI__finitel:
2325 case Builtin::BI__builtin_isinf:
2326 case Builtin::BI__builtin_isfinite: {
2327 // isinf(x) --> fabs(x) == infinity
2328 // isfinite(x) --> fabs(x) != infinity
2329 // x != NaN via the ordered compare in either case.
2330 Value *V = EmitScalarExpr(E->getArg(0));
2331 Value *Fabs = EmitFAbs(*this, V);
2332 Constant *Infinity = ConstantFP::getInfinity(V->getType());
2333 CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
2334 ? CmpInst::FCMP_OEQ
2335 : CmpInst::FCMP_ONE;
2336 Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
2337 return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
2338 }
2339
2340 case Builtin::BI__builtin_isinf_sign: {
2341 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
2342 Value *Arg = EmitScalarExpr(E->getArg(0));
2343 Value *AbsArg = EmitFAbs(*this, Arg);
2344 Value *IsInf = Builder.CreateFCmpOEQ(
2345 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
2346 Value *IsNeg = EmitSignBit(*this, Arg);
2347
2348 llvm::Type *IntTy = ConvertType(E->getType());
2349 Value *Zero = Constant::getNullValue(IntTy);
2350 Value *One = ConstantInt::get(IntTy, 1);
2351 Value *NegativeOne = ConstantInt::get(IntTy, -1);
2352 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
2353 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
2354 return RValue::get(Result);
2355 }
2356
2357 case Builtin::BI__builtin_isnormal: {
2358 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
2359 Value *V = EmitScalarExpr(E->getArg(0));
2360 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
2361
2362 Value *Abs = EmitFAbs(*this, V);
2363 Value *IsLessThanInf =
2364 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
2365 APFloat Smallest = APFloat::getSmallestNormalized(
2366 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
2367 Value *IsNormal =
2368 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
2369 "isnormal");
2370 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
2371 V = Builder.CreateAnd(V, IsNormal, "and");
2372 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
2373 }
2374
2375 case Builtin::BI__builtin_flt_rounds: {
2376 Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
2377
2378 llvm::Type *ResultType = ConvertType(E->getType());
2379 Value *Result = Builder.CreateCall(F);
2380 if (Result->getType() != ResultType)
2381 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2382 "cast");
2383 return RValue::get(Result);
2384 }
2385
2386 case Builtin::BI__builtin_fpclassify: {
2387 Value *V = EmitScalarExpr(E->getArg(5));
2388 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
2389
2390 // Create Result
2391 BasicBlock *Begin = Builder.GetInsertBlock();
2392 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
2393 Builder.SetInsertPoint(End);
2394 PHINode *Result =
2395 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
2396 "fpclassify_result");
2397
2398 // if (V==0) return FP_ZERO
2399 Builder.SetInsertPoint(Begin);
2400 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
2401 "iszero");
2402 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
2403 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
2404 Builder.CreateCondBr(IsZero, End, NotZero);
2405 Result->addIncoming(ZeroLiteral, Begin);
2406
2407 // if (V != V) return FP_NAN
2408 Builder.SetInsertPoint(NotZero);
2409 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
2410 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
2411 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
2412 Builder.CreateCondBr(IsNan, End, NotNan);
2413 Result->addIncoming(NanLiteral, NotZero);
2414
2415 // if (fabs(V) == infinity) return FP_INFINITY
2416 Builder.SetInsertPoint(NotNan);
2417 Value *VAbs = EmitFAbs(*this, V);
2418 Value *IsInf =
2419 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
2420 "isinf");
2421 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
2422 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
2423 Builder.CreateCondBr(IsInf, End, NotInf);
2424 Result->addIncoming(InfLiteral, NotNan);
2425
2426 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
2427 Builder.SetInsertPoint(NotInf);
2428 APFloat Smallest = APFloat::getSmallestNormalized(
2429 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
2430 Value *IsNormal =
2431 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
2432 "isnormal");
2433 Value *NormalResult =
2434 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
2435 EmitScalarExpr(E->getArg(3)));
2436 Builder.CreateBr(End);
2437 Result->addIncoming(NormalResult, NotInf);
2438
2439 // return Result
2440 Builder.SetInsertPoint(End);
2441 return RValue::get(Result);
2442 }
2443
2444 case Builtin::BIalloca:
2445 case Builtin::BI_alloca:
2446 case Builtin::BI__builtin_alloca: {
2447 Value *Size = EmitScalarExpr(E->getArg(0));
2448 const TargetInfo &TI = getContext().getTargetInfo();
2449 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
2450 unsigned SuitableAlignmentInBytes =
2451 CGM.getContext()
2452 .toCharUnitsFromBits(TI.getSuitableAlign())
2453 .getQuantity();
2454 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2455 AI->setAlignment(SuitableAlignmentInBytes);
2456 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
2457 return RValue::get(AI);
2458 }
2459
2460 case Builtin::BI__builtin_alloca_with_align: {
2461 Value *Size = EmitScalarExpr(E->getArg(0));
2462 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
2463 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
2464 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
2465 unsigned AlignmentInBytes =
2466 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity();
2467 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
2468 AI->setAlignment(AlignmentInBytes);
2469 initializeAlloca(*this, AI, Size, AlignmentInBytes);
2470 return RValue::get(AI);
2471 }
2472
2473 case Builtin::BIbzero:
2474 case Builtin::BI__builtin_bzero: {
2475 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2476 Value *SizeVal = EmitScalarExpr(E->getArg(1));
2477 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2478 E->getArg(0)->getExprLoc(), FD, 0);
2479 Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
2480 return RValue::get(nullptr);
2481 }
2482 case Builtin::BImemcpy:
2483 case Builtin::BI__builtin_memcpy: {
2484 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2485 Address Src = EmitPointerWithAlignment(E->getArg(1));
2486 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2487 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2488 E->getArg(0)->getExprLoc(), FD, 0);
2489 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2490 E->getArg(1)->getExprLoc(), FD, 1);
2491 auto CI = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2492 diagnoseMisalignedCapabiliyCopyDest(*this, "memcpy", E->getArg(1), CI);
2493 return RValue::get(Dest.getPointer(), Dest.getAlignment().getQuantity());
2494 }
2495
2496 case Builtin::BI__builtin_char_memchr:
2497 BuiltinID = Builtin::BI__builtin_memchr;
2498 break;
2499
2500 case Builtin::BI__builtin___memcpy_chk: {
2501 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
2502 Expr::EvalResult SizeResult, DstSizeResult;
2503 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2504 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) {
2505 diagnoseMisalignedCapabiliyCopyDest(*this, "__memcpy_chk", E->getArg(1),
2506 E->getArg(0));
2507 break;
2508 }
2509 llvm::APSInt Size = SizeResult.Val.getInt();
2510 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2511 if (Size.ugt(DstSize)) {
2512 diagnoseMisalignedCapabiliyCopyDest(*this, "__memcpy_chk", E->getArg(1),
2513 E->getArg(0));
2514 break;
2515 }
2516 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2517 Address Src = EmitPointerWithAlignment(E->getArg(1));
2518 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2519 auto CI = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
2520 diagnoseMisalignedCapabiliyCopyDest(*this, "memcpy", E->getArg(1), CI);
2521 return RValue::get(Dest.getPointer(), Dest.getAlignment().getQuantity());
2522 }
2523
2524 case Builtin::BI__builtin_objc_memmove_collectable: {
2525 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
2526 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
2527 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2528 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
2529 DestAddr, SrcAddr, SizeVal);
2530 return RValue::get(DestAddr.getPointer(),
2531 DestAddr.getAlignment().getQuantity());
2532 }
2533
2534 case Builtin::BI__builtin___memmove_chk: {
2535 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
2536 Expr::EvalResult SizeResult, DstSizeResult;
2537 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2538 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext())) {
2539 diagnoseMisalignedCapabiliyCopyDest(*this, "__memmove_chk", E->getArg(1),
2540 E->getArg(0));
2541 break;
2542 }
2543 llvm::APSInt Size = SizeResult.Val.getInt();
2544 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2545 if (Size.ugt(DstSize)) {
2546 diagnoseMisalignedCapabiliyCopyDest(*this, "__memmove_chk", E->getArg(1),
2547 E->getArg(0));
2548 break;
2549 }
2550 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2551 Address Src = EmitPointerWithAlignment(E->getArg(1));
2552 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2553 auto CI = Builder.CreateMemMove(Dest, Src, SizeVal, false);
2554 diagnoseMisalignedCapabiliyCopyDest(*this, "memmove", E->getArg(1), CI);
2555 return RValue::get(Dest.getPointer(), Dest.getAlignment().getQuantity());
2556 }
2557
2558 case Builtin::BImemmove:
2559 case Builtin::BI__builtin_memmove: {
2560 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2561 Address Src = EmitPointerWithAlignment(E->getArg(1));
2562 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2563 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2564 E->getArg(0)->getExprLoc(), FD, 0);
2565 EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
2566 E->getArg(1)->getExprLoc(), FD, 1);
2567 auto CI = Builder.CreateMemMove(Dest, Src, SizeVal, false);
2568 diagnoseMisalignedCapabiliyCopyDest(*this, "memmove", E->getArg(1), CI);
2569 return RValue::get(Dest.getPointer(), Dest.getAlignment().getQuantity());
2570 }
2571 case Builtin::BImemset:
2572 case Builtin::BI__builtin_memset: {
2573 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2574 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2575 Builder.getInt8Ty());
2576 Value *SizeVal = EmitScalarExpr(E->getArg(2));
2577 EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
2578 E->getArg(0)->getExprLoc(), FD, 0);
2579 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2580 return RValue::get(Dest.getPointer(), Dest.getAlignment().getQuantity());
2581 }
2582 case Builtin::BI__builtin___memset_chk: {
2583 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
2584 Expr::EvalResult SizeResult, DstSizeResult;
2585 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
2586 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
2587 break;
2588 llvm::APSInt Size = SizeResult.Val.getInt();
2589 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
2590 if (Size.ugt(DstSize))
2591 break;
2592 Address Dest = EmitPointerWithAlignment(E->getArg(0));
2593 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
2594 Builder.getInt8Ty());
2595 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
2596 Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
2597 return RValue::get(Dest.getPointer(), Dest.getAlignment().getQuantity());
2598 }
2599 case Builtin::BI__builtin_wmemcmp: {
2600 // The MSVC runtime library does not provide a definition of wmemcmp, so we
2601 // need an inline implementation.
2602 if (!getTarget().getTriple().isOSMSVCRT())
2603 break;
2604
2605 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
2606
2607 Value *Dst = EmitScalarExpr(E->getArg(0));
2608 Value *Src = EmitScalarExpr(E->getArg(1));
2609 Value *Size = EmitScalarExpr(E->getArg(2));
2610
2611 BasicBlock *Entry = Builder.GetInsertBlock();
2612 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
2613 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
2614 BasicBlock *Next = createBasicBlock("wmemcmp.next");
2615 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
2616 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
2617 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
2618
2619 EmitBlock(CmpGT);
2620 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
2621 DstPhi->addIncoming(Dst, Entry);
2622 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
2623 SrcPhi->addIncoming(Src, Entry);
2624 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
2625 SizePhi->addIncoming(Size, Entry);
2626 CharUnits WCharAlign =
2627 getContext().getTypeAlignInChars(getContext().WCharTy);
2628 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
2629 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
2630 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
2631 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
2632
2633 EmitBlock(CmpLT);
2634 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
2635 Builder.CreateCondBr(DstLtSrc, Exit, Next);
2636
2637 EmitBlock(Next);
2638 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
2639 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
2640 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
2641 Value *NextSizeEq0 =
2642 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
2643 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
2644 DstPhi->addIncoming(NextDst, Next);
2645 SrcPhi->addIncoming(NextSrc, Next);
2646 SizePhi->addIncoming(NextSize, Next);
2647
2648 EmitBlock(Exit);
2649 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
2650 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
2651 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
2652 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
2653 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
2654 return RValue::get(Ret);
2655 }
2656 case Builtin::BI__builtin_dwarf_cfa: {
2657 // The offset in bytes from the first argument to the CFA.
2658 //
2659 // Why on earth is this in the frontend? Is there any reason at
2660 // all that the backend can't reasonably determine this while
2661 // lowering llvm.eh.dwarf.cfa()?
2662 //
2663 // TODO: If there's a satisfactory reason, add a target hook for
2664 // this instead of hard-coding 0, which is correct for most targets.
2665 int32_t Offset = 0;
2666
2667 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
2668 Value *V = Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, Offset));
2669 unsigned AS = CGM.getTargetCodeGenInfo().getDefaultAS();
2670 if (AS != 0)
2671 V = Builder.CreateAddrSpaceCast(V, Int8Ty->getPointerTo(AS));
2672 return RValue::get(V);
2673 }
2674 case Builtin::BI__builtin_return_address: {
2675 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2676 getContext().UnsignedIntTy);
2677 Function *F =
2678 CGM.getIntrinsic(Intrinsic::returnaddress, {CGM.ProgramInt8PtrTy});
2679 return RValue::get(Builder.CreateCall(F, Depth));
2680 }
2681 case Builtin::BI_ReturnAddress: {
2682 Function *F =
2683 CGM.getIntrinsic(Intrinsic::returnaddress, {CGM.ProgramInt8PtrTy});
2684 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
2685 }
2686 case Builtin::BI__builtin_frame_address: {
2687 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
2688 getContext().UnsignedIntTy);
2689 Function *F =
2690 CGM.getIntrinsic(Intrinsic::frameaddress, {CGM.AllocaInt8PtrTy});
2691 return RValue::get(Builder.CreateCall(F, Depth));
2692 }
2693 case Builtin::BI__builtin_extract_return_addr: {
2694 Value *Address = EmitScalarExpr(E->getArg(0));
2695 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
2696 return RValue::get(Result);
2697 }
2698 case Builtin::BI__builtin_frob_return_addr: {
2699 Value *Address = EmitScalarExpr(E->getArg(0));
2700 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
2701 return RValue::get(Result);
2702 }
2703 case Builtin::BI__builtin_dwarf_sp_column: {
2704 llvm::IntegerType *Ty
2705 = cast<llvm::IntegerType>(ConvertType(E->getType()));
2706 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
2707 if (Column == -1) {
2708 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
2709 return RValue::get(llvm::UndefValue::get(Ty));
2710 }
2711 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
2712 }
2713 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
2714 Value *Address = EmitScalarExpr(E->getArg(0));
2715 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
2716 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
2717 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
2718 }
2719 case Builtin::BI__builtin_eh_return: {
2720 Value *Int = EmitScalarExpr(E->getArg(0));
2721 Value *Ptr = EmitScalarExpr(E->getArg(1));
2722 Ptr = Builder.CreatePointerBitCastOrAddrSpaceCast(Ptr,
2723 Int8Ty->getPointerTo(0));
2724
2725 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
2726 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
2727 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
2728 Function *F =
2729 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
2730 : Intrinsic::eh_return_i64);
2731 Builder.CreateCall(F, {Int, Ptr});
2732 Builder.CreateUnreachable();
2733
2734 // We do need to preserve an insertion point.
2735 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
2736
2737 return RValue::get(nullptr);
2738 }
2739 case Builtin::BI__builtin_unwind_init: {
2740 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
2741 return RValue::get(Builder.CreateCall(F));
2742 }
2743 case Builtin::BI__builtin_extend_pointer: {
2744 // Extends a pointer to the size of an _Unwind_Word, which is
2745 // uint64_t on all platforms. Generally this gets poked into a
2746 // register and eventually used as an address, so if the
2747 // addressing registers are wider than pointers and the platform
2748 // doesn't implicitly ignore high-order bits when doing
2749 // addressing, we need to make sure we zext / sext based on
2750 // the platform's expectations.
2751 //
2752 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
2753
2754 // Cast the pointer to intptr_t.
2755 Value *Ptr = EmitScalarExpr(E->getArg(0));
2756 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
2757
2758 // If that's 64 bits, we're done.
2759 if (IntPtrTy->getBitWidth() == 64)
2760 return RValue::get(Result);
2761
2762 // Otherwise, ask the codegen data what to do.
2763 if (getTargetHooks().extendPointerWithSExt())
2764 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
2765 else
2766 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
2767 }
2768 case Builtin::BI__builtin_setjmp: {
2769 // Buffer is a void**.
2770 Address Buf = EmitPointerWithAlignment(E->getArg(0));
2771
2772 // Store the frame pointer to the setjmp buffer.
2773 Value *FrameAddr = Builder.CreateCall(
2774 CGM.getIntrinsic(Intrinsic::frameaddress, {CGM.AllocaInt8PtrTy}),
2775 ConstantInt::get(Int32Ty, 0));
2776 Builder.CreateStore(FrameAddr, Buf);
2777
2778 // Store the stack pointer to the setjmp buffer.
2779 Value *StackAddr =
2780 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave,
2781 { CGM.Int8PtrTy }));
2782 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
2783 Builder.CreateStore(StackAddr, StackSaveSlot);
2784
2785 // Call LLVM's EH setjmp, which is lightweight.
2786 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
2787 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2788 return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
2789 }
2790 case Builtin::BI__builtin_longjmp: {
2791 Value *Buf = EmitScalarExpr(E->getArg(0));
2792 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
2793
2794 // Call LLVM's EH longjmp, which is lightweight.
2795 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
2796
2797 // longjmp doesn't return; mark this as unreachable.
2798 Builder.CreateUnreachable();
2799
2800 // We do need to preserve an insertion point.
2801 EmitBlock(createBasicBlock("longjmp.cont"));
2802
2803 return RValue::get(nullptr);
2804 }
2805 case Builtin::BI__builtin_launder: {
2806 const Expr *Arg = E->getArg(0);
2807 QualType ArgTy = Arg->getType()->getPointeeType();
2808 Value *Ptr = EmitScalarExpr(Arg);
2809 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
2810 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
2811
2812 return RValue::get(Ptr);
2813 }
2814 case Builtin::BI__sync_fetch_and_add:
2815 case Builtin::BI__sync_fetch_and_sub:
2816 case Builtin::BI__sync_fetch_and_or:
2817 case Builtin::BI__sync_fetch_and_and:
2818 case Builtin::BI__sync_fetch_and_xor:
2819 case Builtin::BI__sync_fetch_and_nand:
2820 case Builtin::BI__sync_add_and_fetch:
2821 case Builtin::BI__sync_sub_and_fetch:
2822 case Builtin::BI__sync_and_and_fetch:
2823 case Builtin::BI__sync_or_and_fetch:
2824 case Builtin::BI__sync_xor_and_fetch:
2825 case Builtin::BI__sync_nand_and_fetch:
2826 case Builtin::BI__sync_val_compare_and_swap:
2827 case Builtin::BI__sync_bool_compare_and_swap:
2828 case Builtin::BI__sync_lock_test_and_set:
2829 case Builtin::BI__sync_lock_release:
2830 case Builtin::BI__sync_swap:
2831 llvm_unreachable("Shouldn't make it through sema");
2832 case Builtin::BI__sync_fetch_and_add_1:
2833 case Builtin::BI__sync_fetch_and_add_2:
2834 case Builtin::BI__sync_fetch_and_add_4:
2835 case Builtin::BI__sync_fetch_and_add_8:
2836 case Builtin::BI__sync_fetch_and_add_16:
2837 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
2838 case Builtin::BI__sync_fetch_and_sub_1:
2839 case Builtin::BI__sync_fetch_and_sub_2:
2840 case Builtin::BI__sync_fetch_and_sub_4:
2841 case Builtin::BI__sync_fetch_and_sub_8:
2842 case Builtin::BI__sync_fetch_and_sub_16:
2843 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
2844 case Builtin::BI__sync_fetch_and_or_1:
2845 case Builtin::BI__sync_fetch_and_or_2:
2846 case Builtin::BI__sync_fetch_and_or_4:
2847 case Builtin::BI__sync_fetch_and_or_8:
2848 case Builtin::BI__sync_fetch_and_or_16:
2849 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
2850 case Builtin::BI__sync_fetch_and_and_1:
2851 case Builtin::BI__sync_fetch_and_and_2:
2852 case Builtin::BI__sync_fetch_and_and_4:
2853 case Builtin::BI__sync_fetch_and_and_8:
2854 case Builtin::BI__sync_fetch_and_and_16:
2855 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
2856 case Builtin::BI__sync_fetch_and_xor_1:
2857 case Builtin::BI__sync_fetch_and_xor_2:
2858 case Builtin::BI__sync_fetch_and_xor_4:
2859 case Builtin::BI__sync_fetch_and_xor_8:
2860 case Builtin::BI__sync_fetch_and_xor_16:
2861 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
2862 case Builtin::BI__sync_fetch_and_nand_1:
2863 case Builtin::BI__sync_fetch_and_nand_2:
2864 case Builtin::BI__sync_fetch_and_nand_4:
2865 case Builtin::BI__sync_fetch_and_nand_8:
2866 case Builtin::BI__sync_fetch_and_nand_16:
2867 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
2868
2869 // Clang extensions: not overloaded yet.
2870 case Builtin::BI__sync_fetch_and_min:
2871 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
2872 case Builtin::BI__sync_fetch_and_max:
2873 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
2874 case Builtin::BI__sync_fetch_and_umin:
2875 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
2876 case Builtin::BI__sync_fetch_and_umax:
2877 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
2878
2879 case Builtin::BI__sync_add_and_fetch_1:
2880 case Builtin::BI__sync_add_and_fetch_2:
2881 case Builtin::BI__sync_add_and_fetch_4:
2882 case Builtin::BI__sync_add_and_fetch_8:
2883 case Builtin::BI__sync_add_and_fetch_16:
2884 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
2885 llvm::Instruction::Add);
2886 case Builtin::BI__sync_sub_and_fetch_1:
2887 case Builtin::BI__sync_sub_and_fetch_2:
2888 case Builtin::BI__sync_sub_and_fetch_4:
2889 case Builtin::BI__sync_sub_and_fetch_8:
2890 case Builtin::BI__sync_sub_and_fetch_16:
2891 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
2892 llvm::Instruction::Sub);
2893 case Builtin::BI__sync_and_and_fetch_1:
2894 case Builtin::BI__sync_and_and_fetch_2:
2895 case Builtin::BI__sync_and_and_fetch_4:
2896 case Builtin::BI__sync_and_and_fetch_8:
2897 case Builtin::BI__sync_and_and_fetch_16:
2898 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
2899 llvm::Instruction::And);
2900 case Builtin::BI__sync_or_and_fetch_1:
2901 case Builtin::BI__sync_or_and_fetch_2:
2902 case Builtin::BI__sync_or_and_fetch_4:
2903 case Builtin::BI__sync_or_and_fetch_8:
2904 case Builtin::BI__sync_or_and_fetch_16:
2905 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
2906 llvm::Instruction::Or);
2907 case Builtin::BI__sync_xor_and_fetch_1:
2908 case Builtin::BI__sync_xor_and_fetch_2:
2909 case Builtin::BI__sync_xor_and_fetch_4:
2910 case Builtin::BI__sync_xor_and_fetch_8:
2911 case Builtin::BI__sync_xor_and_fetch_16:
2912 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
2913 llvm::Instruction::Xor);
2914 case Builtin::BI__sync_nand_and_fetch_1:
2915 case Builtin::BI__sync_nand_and_fetch_2:
2916 case Builtin::BI__sync_nand_and_fetch_4:
2917 case Builtin::BI__sync_nand_and_fetch_8:
2918 case Builtin::BI__sync_nand_and_fetch_16:
2919 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
2920 llvm::Instruction::And, true);
2921
2922 case Builtin::BI__sync_val_compare_and_swap_1:
2923 case Builtin::BI__sync_val_compare_and_swap_2:
2924 case Builtin::BI__sync_val_compare_and_swap_4:
2925 case Builtin::BI__sync_val_compare_and_swap_8:
2926 case Builtin::BI__sync_val_compare_and_swap_16:
2927 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
2928
2929 case Builtin::BI__sync_bool_compare_and_swap_1:
2930 case Builtin::BI__sync_bool_compare_and_swap_2:
2931 case Builtin::BI__sync_bool_compare_and_swap_4:
2932 case Builtin::BI__sync_bool_compare_and_swap_8:
2933 case Builtin::BI__sync_bool_compare_and_swap_16:
2934 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
2935
2936 case Builtin::BI__sync_swap_1:
2937 case Builtin::BI__sync_swap_2:
2938 case Builtin::BI__sync_swap_4:
2939 case Builtin::BI__sync_swap_8:
2940 case Builtin::BI__sync_swap_16:
2941 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
2942
2943 case Builtin::BI__sync_lock_test_and_set_1:
2944 case Builtin::BI__sync_lock_test_and_set_2:
2945 case Builtin::BI__sync_lock_test_and_set_4:
2946 case Builtin::BI__sync_lock_test_and_set_8:
2947 case Builtin::BI__sync_lock_test_and_set_16:
2948 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
2949
2950 case Builtin::BI__sync_lock_release_1:
2951 case Builtin::BI__sync_lock_release_2:
2952 case Builtin::BI__sync_lock_release_4:
2953 case Builtin::BI__sync_lock_release_8:
2954 case Builtin::BI__sync_lock_release_16: {
2955 Value *Ptr = EmitScalarExpr(E->getArg(0));
2956 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
2957 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
2958 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
2959 StoreSize.getQuantity() * 8);
2960 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo(DefaultAS));
2961 llvm::StoreInst *Store =
2962 Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
2963 StoreSize);
2964 Store->setAtomic(llvm::AtomicOrdering::Release);
2965 return RValue::get(nullptr);
2966 }
2967
2968 case Builtin::BI__sync_synchronize: {
2969 // We assume this is supposed to correspond to a C++0x-style
2970 // sequentially-consistent fence (i.e. this is only usable for
2971 // synchronization, not device I/O or anything like that). This intrinsic
2972 // is really badly designed in the sense that in theory, there isn't
2973 // any way to safely use it... but in practice, it mostly works
2974 // to use it with non-atomic loads and stores to get acquire/release
2975 // semantics.
2976 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
2977 return RValue::get(nullptr);
2978 }
2979
2980 case Builtin::BI__builtin_nontemporal_load:
2981 return RValue::get(EmitNontemporalLoad(*this, E));
2982 case Builtin::BI__builtin_nontemporal_store:
2983 return RValue::get(EmitNontemporalStore(*this, E, DefaultAS));
2984 case Builtin::BI__c11_atomic_is_lock_free:
2985 case Builtin::BI__atomic_is_lock_free: {
2986 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
2987 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
2988 // _Atomic(T) is always properly-aligned.
2989 const char *LibCallName = "__atomic_is_lock_free";
2990 CallArgList Args;
2991 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
2992 getContext().getSizeType());
2993 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
2994 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
2995 getContext().VoidPtrTy);
2996 else
2997 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
2998 getContext().VoidPtrTy);
2999 const CGFunctionInfo &FuncInfo =
3000 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3001 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3002 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3003 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3004 ReturnValueSlot(), Args);
3005 }
3006
3007 case Builtin::BI__atomic_test_and_set: {
3008 // Look at the argument type to determine whether this is a volatile
3009 // operation. The parameter type is always volatile.
3010 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3011 bool Volatile =
3012 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3013
3014 Value *Ptr = EmitScalarExpr(E->getArg(0));
3015 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
3016 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3017 Value *NewVal = Builder.getInt8(1);
3018 Value *Order = EmitScalarExpr(E->getArg(1));
3019 if (isa<llvm::ConstantInt>(Order)) {
3020 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3021 AtomicRMWInst *Result = nullptr;
3022 switch (ord) {
3023 case 0: // memory_order_relaxed
3024 default: // invalid order
3025 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3026 llvm::AtomicOrdering::Monotonic);
3027 break;
3028 case 1: // memory_order_consume
3029 case 2: // memory_order_acquire
3030 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3031 llvm::AtomicOrdering::Acquire);
3032 break;
3033 case 3: // memory_order_release
3034 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3035 llvm::AtomicOrdering::Release);
3036 break;
3037 case 4: // memory_order_acq_rel
3038
3039 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3040 llvm::AtomicOrdering::AcquireRelease);
3041 break;
3042 case 5: // memory_order_seq_cst
3043 Result = Builder.CreateAtomicRMW(
3044 llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
3045 llvm::AtomicOrdering::SequentiallyConsistent);
3046 break;
3047 }
3048 Result->setVolatile(Volatile);
3049 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3050 }
3051
3052 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3053
3054 llvm::BasicBlock *BBs[5] = {
3055 createBasicBlock("monotonic", CurFn),
3056 createBasicBlock("acquire", CurFn),
3057 createBasicBlock("release", CurFn),
3058 createBasicBlock("acqrel", CurFn),
3059 createBasicBlock("seqcst", CurFn)
3060 };
3061 llvm::AtomicOrdering Orders[5] = {
3062 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
3063 llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
3064 llvm::AtomicOrdering::SequentiallyConsistent};
3065
3066 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3067 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3068
3069 Builder.SetInsertPoint(ContBB);
3070 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
3071
3072 for (unsigned i = 0; i < 5; ++i) {
3073 Builder.SetInsertPoint(BBs[i]);
3074 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
3075 Ptr, NewVal, Orders[i]);
3076 RMW->setVolatile(Volatile);
3077 Result->addIncoming(RMW, BBs[i]);
3078 Builder.CreateBr(ContBB);
3079 }
3080
3081 SI->addCase(Builder.getInt32(0), BBs[0]);
3082 SI->addCase(Builder.getInt32(1), BBs[1]);
3083 SI->addCase(Builder.getInt32(2), BBs[1]);
3084 SI->addCase(Builder.getInt32(3), BBs[2]);
3085 SI->addCase(Builder.getInt32(4), BBs[3]);
3086 SI->addCase(Builder.getInt32(5), BBs[4]);
3087
3088 Builder.SetInsertPoint(ContBB);
3089 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
3090 }
3091
3092 case Builtin::BI__atomic_clear: {
3093 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3094 bool Volatile =
3095 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3096
3097 Address Ptr = EmitPointerWithAlignment(E->getArg(0));
3098 unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
3099 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
3100 Value *NewVal = Builder.getInt8(0);
3101 Value *Order = EmitScalarExpr(E->getArg(1));
3102 if (isa<llvm::ConstantInt>(Order)) {
3103 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3104 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3105 switch (ord) {
3106 case 0: // memory_order_relaxed
3107 default: // invalid order
3108 Store->setOrdering(llvm::AtomicOrdering::Monotonic);
3109 break;
3110 case 3: // memory_order_release
3111 Store->setOrdering(llvm::AtomicOrdering::Release);
3112 break;
3113 case 5: // memory_order_seq_cst
3114 Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
3115 break;
3116 }
3117 return RValue::get(nullptr);
3118 }
3119
3120 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3121
3122 llvm::BasicBlock *BBs[3] = {
3123 createBasicBlock("monotonic", CurFn),
3124 createBasicBlock("release", CurFn),
3125 createBasicBlock("seqcst", CurFn)
3126 };
3127 llvm::AtomicOrdering Orders[3] = {
3128 llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
3129 llvm::AtomicOrdering::SequentiallyConsistent};
3130
3131 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3132 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
3133
3134 for (unsigned i = 0; i < 3; ++i) {
3135 Builder.SetInsertPoint(BBs[i]);
3136 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
3137 Store->setOrdering(Orders[i]);
3138 Builder.CreateBr(ContBB);
3139 }
3140
3141 SI->addCase(Builder.getInt32(0), BBs[0]);
3142 SI->addCase(Builder.getInt32(3), BBs[1]);
3143 SI->addCase(Builder.getInt32(5), BBs[2]);
3144
3145 Builder.SetInsertPoint(ContBB);
3146 return RValue::get(nullptr);
3147 }
3148
3149 case Builtin::BI__atomic_thread_fence:
3150 case Builtin::BI__atomic_signal_fence:
3151 case Builtin::BI__c11_atomic_thread_fence:
3152 case Builtin::BI__c11_atomic_signal_fence: {
3153 llvm::SyncScope::ID SSID;
3154 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
3155 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
3156 SSID = llvm::SyncScope::SingleThread;
3157 else
3158 SSID = llvm::SyncScope::System;
3159 Value *Order = EmitScalarExpr(E->getArg(0));
3160 if (isa<llvm::ConstantInt>(Order)) {
3161 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
3162 switch (ord) {
3163 case 0: // memory_order_relaxed
3164 default: // invalid order
3165 break;
3166 case 1: // memory_order_consume
3167 case 2: // memory_order_acquire
3168 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3169 break;
3170 case 3: // memory_order_release
3171 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3172 break;
3173 case 4: // memory_order_acq_rel
3174 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3175 break;
3176 case 5: // memory_order_seq_cst
3177 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3178 break;
3179 }
3180 return RValue::get(nullptr);
3181 }
3182
3183 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
3184 AcquireBB = createBasicBlock("acquire", CurFn);
3185 ReleaseBB = createBasicBlock("release", CurFn);
3186 AcqRelBB = createBasicBlock("acqrel", CurFn);
3187 SeqCstBB = createBasicBlock("seqcst", CurFn);
3188 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
3189
3190 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
3191 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
3192
3193 Builder.SetInsertPoint(AcquireBB);
3194 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
3195 Builder.CreateBr(ContBB);
3196 SI->addCase(Builder.getInt32(1), AcquireBB);
3197 SI->addCase(Builder.getInt32(2), AcquireBB);
3198
3199 Builder.SetInsertPoint(ReleaseBB);
3200 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
3201 Builder.CreateBr(ContBB);
3202 SI->addCase(Builder.getInt32(3), ReleaseBB);
3203
3204 Builder.SetInsertPoint(AcqRelBB);
3205 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
3206 Builder.CreateBr(ContBB);
3207 SI->addCase(Builder.getInt32(4), AcqRelBB);
3208
3209 Builder.SetInsertPoint(SeqCstBB);
3210 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
3211 Builder.CreateBr(ContBB);
3212 SI->addCase(Builder.getInt32(5), SeqCstBB);
3213
3214 Builder.SetInsertPoint(ContBB);
3215 return RValue::get(nullptr);
3216 }
3217
3218 case Builtin::BI__builtin_signbit:
3219 case Builtin::BI__builtin_signbitf:
3220 case Builtin::BI__builtin_signbitl: {
3221 return RValue::get(
3222 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
3223 ConvertType(E->getType())));
3224 }
3225 case Builtin::BI__annotation: {
3226 // Re-encode each wide string to UTF8 and make an MDString.
3227 SmallVector<Metadata *, 1> Strings;
3228 for (const Expr *Arg : E->arguments()) {
3229 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
3230 assert(Str->getCharByteWidth() == 2);
3231 StringRef WideBytes = Str->getBytes();
3232 std::string StrUtf8;
3233 if (!convertUTF16ToUTF8String(
3234 makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
3235 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
3236 continue;
3237 }
3238 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
3239 }
3240
3241 // Build and MDTuple of MDStrings and emit the intrinsic call.
3242 llvm::Function *F =
3243 CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
3244 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
3245 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
3246 return RValue::getIgnored();
3247 }
3248 case Builtin::BI__builtin_annotation: {
3249 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
3250 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
3251 {AnnVal->getType(), CGM.Int8PtrTy});
3252
3253 // Get the annotation string, go through casts. Sema requires this to be a
3254 // non-wide string literal, potentially casted, so the cast<> is safe.
3255 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
3256 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
3257 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
3258 }
3259 case Builtin::BI__builtin_addcb:
3260 case Builtin::BI__builtin_addcs:
3261 case Builtin::BI__builtin_addc:
3262 case Builtin::BI__builtin_addcl:
3263 case Builtin::BI__builtin_addcll:
3264 case Builtin::BI__builtin_subcb:
3265 case Builtin::BI__builtin_subcs:
3266 case Builtin::BI__builtin_subc:
3267 case Builtin::BI__builtin_subcl:
3268 case Builtin::BI__builtin_subcll: {
3269
3270 // We translate all of these builtins from expressions of the form:
3271 // int x = ..., y = ..., carryin = ..., carryout, result;
3272 // result = __builtin_addc(x, y, carryin, &carryout);
3273 //
3274 // to LLVM IR of the form:
3275 //
3276 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
3277 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
3278 // %carry1 = extractvalue {i32, i1} %tmp1, 1
3279 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
3280 // i32 %carryin)
3281 // %result = extractvalue {i32, i1} %tmp2, 0
3282 // %carry2 = extractvalue {i32, i1} %tmp2, 1
3283 // %tmp3 = or i1 %carry1, %carry2
3284 // %tmp4 = zext i1 %tmp3 to i32
3285 // store i32 %tmp4, i32* %carryout
3286
3287 // Scalarize our inputs.
3288 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3289 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3290 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
3291 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
3292
3293 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
3294 llvm::Intrinsic::ID IntrinsicId;
3295 switch (BuiltinID) {
3296 default: llvm_unreachable("Unknown multiprecision builtin id.");
3297 case Builtin::BI__builtin_addcb:
3298 case Builtin::BI__builtin_addcs:
3299 case Builtin::BI__builtin_addc:
3300 case Builtin::BI__builtin_addcl:
3301 case Builtin::BI__builtin_addcll:
3302 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3303 break;
3304 case Builtin::BI__builtin_subcb:
3305 case Builtin::BI__builtin_subcs:
3306 case Builtin::BI__builtin_subc:
3307 case Builtin::BI__builtin_subcl:
3308 case Builtin::BI__builtin_subcll:
3309 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3310 break;
3311 }
3312
3313 // Construct our resulting LLVM IR expression.
3314 llvm::Value *Carry1;
3315 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
3316 X, Y, Carry1);
3317 llvm::Value *Carry2;
3318 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
3319 Sum1, Carryin, Carry2);
3320 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
3321 X->getType());
3322 Builder.CreateStore(CarryOut, CarryOutPtr);
3323 return RValue::get(Sum2);
3324 }
3325
3326 case Builtin::BI__builtin_add_overflow:
3327 case Builtin::BI__builtin_sub_overflow:
3328 case Builtin::BI__builtin_mul_overflow: {
3329 const clang::Expr *LeftArg = E->getArg(0);
3330 const clang::Expr *RightArg = E->getArg(1);
3331 const clang::Expr *ResultArg = E->getArg(2);
3332
3333 clang::QualType ResultQTy =
3334 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
3335
3336 WidthAndSignedness LeftInfo =
3337 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
3338 WidthAndSignedness RightInfo =
3339 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
3340 WidthAndSignedness ResultInfo =
3341 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
3342
3343 // Handle mixed-sign multiplication as a special case, because adding
3344 // runtime or backend support for our generic irgen would be too expensive.
3345 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
3346 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
3347 RightInfo, ResultArg, ResultQTy,
3348 ResultInfo);
3349
3350 WidthAndSignedness EncompassingInfo =
3351 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
3352
3353 llvm::Type *EncompassingLLVMTy =
3354 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
3355
3356 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
3357
3358 llvm::Intrinsic::ID IntrinsicId;
3359 switch (BuiltinID) {
3360 default:
3361 llvm_unreachable("Unknown overflow builtin id.");
3362 case Builtin::BI__builtin_add_overflow:
3363 IntrinsicId = EncompassingInfo.Signed
3364 ? llvm::Intrinsic::sadd_with_overflow
3365 : llvm::Intrinsic::uadd_with_overflow;
3366 break;
3367 case Builtin::BI__builtin_sub_overflow:
3368 IntrinsicId = EncompassingInfo.Signed
3369 ? llvm::Intrinsic::ssub_with_overflow
3370 : llvm::Intrinsic::usub_with_overflow;
3371 break;
3372 case Builtin::BI__builtin_mul_overflow:
3373 IntrinsicId = EncompassingInfo.Signed
3374 ? llvm::Intrinsic::smul_with_overflow
3375 : llvm::Intrinsic::umul_with_overflow;
3376 break;
3377 }
3378
3379 llvm::Value *Left = EmitScalarExpr(LeftArg);
3380 llvm::Value *Right = EmitScalarExpr(RightArg);
3381 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
3382
3383 // Extend each operand to the encompassing type.
3384 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
3385 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
3386
3387 // Perform the operation on the extended values.
3388 llvm::Value *Overflow, *Result;
3389 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
3390
3391 if (EncompassingInfo.Width > ResultInfo.Width) {
3392 // The encompassing type is wider than the result type, so we need to
3393 // truncate it.
3394 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
3395
3396 // To see if the truncation caused an overflow, we will extend
3397 // the result and then compare it to the original result.
3398 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
3399 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
3400 llvm::Value *TruncationOverflow =
3401 Builder.CreateICmpNE(Result, ResultTruncExt);
3402
3403 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
3404 Result = ResultTrunc;
3405 }
3406
3407 // Finally, store the result using the pointer.
3408 bool isVolatile =
3409 ResultArg->getType()->getPointeeType().isVolatileQualified();
3410 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
3411
3412 return RValue::get(Overflow);
3413 }
3414
3415 case Builtin::BI__builtin_uadd_overflow:
3416 case Builtin::BI__builtin_uaddl_overflow:
3417 case Builtin::BI__builtin_uaddll_overflow:
3418 case Builtin::BI__builtin_usub_overflow:
3419 case Builtin::BI__builtin_usubl_overflow:
3420 case Builtin::BI__builtin_usubll_overflow:
3421 case Builtin::BI__builtin_umul_overflow:
3422 case Builtin::BI__builtin_umull_overflow:
3423 case Builtin::BI__builtin_umulll_overflow:
3424 case Builtin::BI__builtin_sadd_overflow:
3425 case Builtin::BI__builtin_saddl_overflow:
3426 case Builtin::BI__builtin_saddll_overflow:
3427 case Builtin::BI__builtin_ssub_overflow:
3428 case Builtin::BI__builtin_ssubl_overflow:
3429 case Builtin::BI__builtin_ssubll_overflow:
3430 case Builtin::BI__builtin_smul_overflow:
3431 case Builtin::BI__builtin_smull_overflow:
3432 case Builtin::BI__builtin_smulll_overflow: {
3433
3434 // We translate all of these builtins directly to the relevant llvm IR node.
3435
3436 // Scalarize our inputs.
3437 llvm::Value *X = EmitScalarExpr(E->getArg(0));
3438 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
3439 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
3440
3441 // Decide which of the overflow intrinsics we are lowering to:
3442 llvm::Intrinsic::ID IntrinsicId;
3443 switch (BuiltinID) {
3444 default: llvm_unreachable("Unknown overflow builtin id.");
3445 case Builtin::BI__builtin_uadd_overflow:
3446 case Builtin::BI__builtin_uaddl_overflow:
3447 case Builtin::BI__builtin_uaddll_overflow:
3448 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
3449 break;
3450 case Builtin::BI__builtin_usub_overflow:
3451 case Builtin::BI__builtin_usubl_overflow:
3452 case Builtin::BI__builtin_usubll_overflow:
3453 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
3454 break;
3455 case Builtin::BI__builtin_umul_overflow:
3456 case Builtin::BI__builtin_umull_overflow:
3457 case Builtin::BI__builtin_umulll_overflow:
3458 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
3459 break;
3460 case Builtin::BI__builtin_sadd_overflow:
3461 case Builtin::BI__builtin_saddl_overflow:
3462 case Builtin::BI__builtin_saddll_overflow:
3463 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
3464 break;
3465 case Builtin::BI__builtin_ssub_overflow:
3466 case Builtin::BI__builtin_ssubl_overflow:
3467 case Builtin::BI__builtin_ssubll_overflow:
3468 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
3469 break;
3470 case Builtin::BI__builtin_smul_overflow:
3471 case Builtin::BI__builtin_smull_overflow:
3472 case Builtin::BI__builtin_smulll_overflow:
3473 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
3474 break;
3475 }
3476
3477
3478 llvm::Value *Carry;
3479 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
3480 Builder.CreateStore(Sum, SumOutPtr);
3481
3482 return RValue::get(Carry);
3483 }
3484 case Builtin::BI__builtin_addressof: {
3485 Value *Addr = EmitLValue(E->getArg(0)).getPointer();
3486 if (getLangOpts().getCheriBounds() >= LangOptions::CBM_SubObjectsSafe) {
3487 auto BoundedAddr = setCHERIBoundsOnAddrOf(Addr, E->getArg(0)->getType(),
3488 E->getArg(0), E);
3489 assert(BoundedAddr->getType() == Addr->getType());
3490 Addr = BoundedAddr;
3491 }
3492 return RValue::get(Addr);
3493 }
3494 case Builtin::BI__builtin_operator_new:
3495 return EmitBuiltinNewDeleteCall(
3496 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
3497 case Builtin::BI__builtin_operator_delete:
3498 return EmitBuiltinNewDeleteCall(
3499 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
3500
3501 case Builtin::BI__builtin_is_aligned:
3502 return EmitBuiltinIsAligned(E, false);
3503 case Builtin::BI__builtin_is_p2aligned:
3504 return EmitBuiltinIsAligned(E, true);
3505 case Builtin::BI__builtin_align_up:
3506 return EmitBuiltinAlignTo(E, false, true);
3507 case Builtin::BI__builtin_p2align_up:
3508 return EmitBuiltinAlignTo(E, true, true);
3509 case Builtin::BI__builtin_align_down:
3510 return EmitBuiltinAlignTo(E, false, false);
3511 case Builtin::BI__builtin_p2align_down:
3512 return EmitBuiltinAlignTo(E, true, false);
3513
3514 case Builtin::BI__noop:
3515 // __noop always evaluates to an integer literal zero.
3516 return RValue::get(ConstantInt::get(IntTy, 0));
3517 case Builtin::BI__builtin_call_with_static_chain: {
3518 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
3519 const Expr *Chain = E->getArg(1);
3520 return EmitCall(Call->getCallee()->getType(),
3521 EmitCallee(Call->getCallee()), Call, ReturnValue,
3522 EmitScalarExpr(Chain));
3523 }
3524 case Builtin::BI_InterlockedExchange8:
3525 case Builtin::BI_InterlockedExchange16:
3526 case Builtin::BI_InterlockedExchange:
3527 case Builtin::BI_InterlockedExchangePointer:
3528 return RValue::get(
3529 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
3530 case Builtin::BI_InterlockedCompareExchangePointer:
3531 case Builtin::BI_InterlockedCompareExchangePointer_nf: {
3532 llvm::Type *RTy;
3533 llvm::IntegerType *IntType =
3534 IntegerType::get(getLLVMContext(),
3535 getContext().getTypeSize(E->getType()));
3536 llvm::Type *IntPtrType = IntType->getPointerTo(DefaultAS);
3537
3538 llvm::Value *Destination =
3539 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
3540
3541 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
3542 RTy = Exchange->getType();
3543 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
3544
3545 llvm::Value *Comparand =
3546 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
3547
3548 auto Ordering =
3549 BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
3550 AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
3551
3552 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
3553 Ordering, Ordering);
3554 Result->setVolatile(true);
3555
3556 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
3557 0),
3558 RTy));
3559 }
3560 case Builtin::BI_InterlockedCompareExchange8:
3561 case Builtin::BI_InterlockedCompareExchange16:
3562 case Builtin::BI_InterlockedCompareExchange:
3563 case Builtin::BI_InterlockedCompareExchange64:
3564 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
3565 case Builtin::BI_InterlockedIncrement16:
3566 case Builtin::BI_InterlockedIncrement:
3567 return RValue::get(
3568 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
3569 case Builtin::BI_InterlockedDecrement16:
3570 case Builtin::BI_InterlockedDecrement:
3571 return RValue::get(
3572 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
3573 case Builtin::BI_InterlockedAnd8:
3574 case Builtin::BI_InterlockedAnd16:
3575 case Builtin::BI_InterlockedAnd:
3576 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
3577 case Builtin::BI_InterlockedExchangeAdd8:
3578 case Builtin::BI_InterlockedExchangeAdd16:
3579 case Builtin::BI_InterlockedExchangeAdd:
3580 return RValue::get(
3581 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
3582 case Builtin::BI_InterlockedExchangeSub8:
3583 case Builtin::BI_InterlockedExchangeSub16:
3584 case Builtin::BI_InterlockedExchangeSub:
3585 return RValue::get(
3586 EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
3587 case Builtin::BI_InterlockedOr8:
3588 case Builtin::BI_InterlockedOr16:
3589 case Builtin::BI_InterlockedOr:
3590 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
3591 case Builtin::BI_InterlockedXor8:
3592 case Builtin::BI_InterlockedXor16:
3593 case Builtin::BI_InterlockedXor:
3594 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
3595
3596 case Builtin::BI_bittest64:
3597 case Builtin::BI_bittest:
3598 case Builtin::BI_bittestandcomplement64:
3599 case Builtin::BI_bittestandcomplement:
3600 case Builtin::BI_bittestandreset64:
3601 case Builtin::BI_bittestandreset:
3602 case Builtin::BI_bittestandset64:
3603 case Builtin::BI_bittestandset:
3604 case Builtin::BI_interlockedbittestandreset:
3605 case Builtin::BI_interlockedbittestandreset64:
3606 case Builtin::BI_interlockedbittestandset64:
3607 case Builtin::BI_interlockedbittestandset:
3608 case Builtin::BI_interlockedbittestandset_acq:
3609 case Builtin::BI_interlockedbittestandset_rel:
3610 case Builtin::BI_interlockedbittestandset_nf:
3611 case Builtin::BI_interlockedbittestandreset_acq:
3612 case Builtin::BI_interlockedbittestandreset_rel:
3613 case Builtin::BI_interlockedbittestandreset_nf:
3614 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
3615
3616 // These builtins exist to emit regular volatile loads and stores not
3617 // affected by the -fms-volatile setting.
3618 case Builtin::BI__iso_volatile_load8:
3619 case Builtin::BI__iso_volatile_load16:
3620 case Builtin::BI__iso_volatile_load32:
3621 case Builtin::BI__iso_volatile_load64:
3622 return RValue::get(EmitISOVolatileLoad(*this, E));
3623 case Builtin::BI__iso_volatile_store8:
3624 case Builtin::BI__iso_volatile_store16:
3625 case Builtin::BI__iso_volatile_store32:
3626 case Builtin::BI__iso_volatile_store64:
3627 return RValue::get(EmitISOVolatileStore(*this, E));
3628
3629 case Builtin::BI__exception_code:
3630 case Builtin::BI_exception_code:
3631 return RValue::get(EmitSEHExceptionCode());
3632 case Builtin::BI__exception_info:
3633 case Builtin::BI_exception_info:
3634 return RValue::get(EmitSEHExceptionInfo());
3635 case Builtin::BI__abnormal_termination:
3636 case Builtin::BI_abnormal_termination:
3637 return RValue::get(EmitSEHAbnormalTermination());
3638 case Builtin::BI_setjmpex:
3639 if (getTarget().getTriple().isOSMSVCRT())
3640 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3641 break;
3642 case Builtin::BI_setjmp:
3643 if (getTarget().getTriple().isOSMSVCRT()) {
3644 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
3645 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
3646 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
3647 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
3648 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
3649 }
3650 break;
3651
3652 case Builtin::BI__GetExceptionInfo: {
3653 if (llvm::GlobalVariable *GV =
3654 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
3655 return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
3656 break;
3657 }
3658
3659 case Builtin::BI__builtin_cheri_cap_from_pointer: {
3660 Value *GlobalCap = EmitScalarExpr(E->getArg(0));
3661 Value *Ptr = Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(1)), IntPtrTy);
3662 return RValue::get(Builder.CreateCall(
3663 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_from_pointer, IntPtrTy),
3664 {GlobalCap, Ptr}));
3665 }
3666 case Builtin::BI__builtin_cheri_cap_to_pointer: {
3667 Value *GlobalCap = EmitScalarExpr(E->getArg(0));
3668 Value *Cap = EmitScalarExpr(E->getArg(1));
3669 Value *Ptr = Builder.CreateCall(
3670 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_to_pointer, IntPtrTy),
3671 {GlobalCap, Cap});
3672 return RValue::get(Builder.CreateIntToPtr(Ptr, ConvertType(E->getType())));
3673 }
3674
3675 case Builtin::BI__builtin_cheri_bounds_set:
3676 return RValue::get(Builder.CreateCall(
3677 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_bounds_set, SizeTy),
3678 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}));
3679 case Builtin::BI__builtin_cheri_bounds_set_exact:
3680 return RValue::get(Builder.CreateCall(
3681 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_bounds_set_exact, SizeTy),
3682 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}));
3683 case Builtin::BI__builtin_cheri_length_get:
3684 return RValue::get(Builder.CreateCall(
3685 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_length_get, SizeTy),
3686 {EmitScalarExpr(E->getArg(0))}));
3687 case Builtin::BI__builtin_cheri_base_get:
3688 return RValue::get(Builder.CreateCall(
3689 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_base_get, IntPtrTy),
3690 {EmitScalarExpr(E->getArg(0))}));
3691 case Builtin::BI__builtin_cheri_perms_and:
3692 return RValue::get(Builder.CreateCall(
3693 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_perms_and, SizeTy),
3694 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}));
3695 case Builtin::BI__builtin_cheri_perms_get:
3696 return RValue::get(Builder.CreateCall(
3697 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_perms_get, SizeTy),
3698 {EmitScalarExpr(E->getArg(0))}));
3699 case Builtin::BI__builtin_cheri_flags_set:
3700 return RValue::get(Builder.CreateCall(
3701 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_flags_set, SizeTy),
3702 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}));
3703 case Builtin::BI__builtin_cheri_flags_get:
3704 return RValue::get(Builder.CreateCall(
3705 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_flags_get, SizeTy),
3706 {EmitScalarExpr(E->getArg(0))}));
3707 case Builtin::BI__builtin_cheri_type_get:
3708 return RValue::get(Builder.CreateCall(
3709 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_type_get, IntPtrTy),
3710 {EmitScalarExpr(E->getArg(0))}));
3711 case Builtin::BI__builtin_cheri_perms_check:
3712 return RValue::get(Builder.CreateCall(
3713 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_perms_check, SizeTy),
3714 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}));
3715 case Builtin::BI__builtin_cheri_offset_increment:
3716 return RValue::get(Builder.CreateCall(
3717 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_offset_increment, SizeTy),
3718 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}));
3719 case Builtin::BI__builtin_cheri_offset_set:
3720 return RValue::get(Builder.CreateCall(
3721 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_offset_set, SizeTy),
3722 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}));
3723 case Builtin::BI__builtin_cheri_offset_get:
3724 return RValue::get(Builder.CreateCall(
3725 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_offset_get, SizeTy),
3726 {EmitScalarExpr(E->getArg(0))}));
3727
3728 // Round to capability precision:
3729 // TODO: should we handle targets that don't have any precision constraints
3730 // here or in the backend?
3731 case Builtin::BI__builtin_cheri_round_representable_length:
3732 return RValue::get(Builder.CreateCall(
3733 CGM.getIntrinsic(llvm::Intrinsic::cheri_round_representable_length, {SizeTy}),
3734 {EmitScalarExpr(E->getArg(0))}));
3735 case Builtin::BI__builtin_cheri_representable_alignment_mask:
3736 return RValue::get(Builder.CreateCall(
3737 CGM.getIntrinsic(llvm::Intrinsic::cheri_representable_alignment_mask, {SizeTy}),
3738 {EmitScalarExpr(E->getArg(0))}));
3739
3740 case Builtin::BI__builtin_cheri_callback_create: {
3741 StringRef ClassName = cast<StringLiteral>(E->getArg(0))->getString();
3742 auto Fn = cast<DeclRefExpr>(E->getArg(2));
3743 StringRef FunctionName = cast<NamedDecl>(Fn->getDecl())->getName().str();
3744 auto *MethodNumVar =
3745 CGM.EmitSandboxRequiredMethod(ClassName, FunctionName);
3746 // Load the global and use it in the call
3747 // FIXME: EmitSandboxRequiredMethod should return an Address so that we
3748 // don't have to know the alignment here.
3749 auto *MethodNum = Builder.CreateLoad(Address(MethodNumVar,
3750 CharUnits::fromQuantity(8)));
3751
3752 auto MethNoTy = llvm::Type::getInt64Ty(getLLVMContext());
3753 auto ClsTy = ConvertType(CGM.getContext().getCHERIClassType());
3754 auto ResultType = llvm::StructType::get(ClsTy, MethNoTy);
3755 LValue Obj = EmitAggExprToLValue(E->getArg(1));
3756 auto ClsVal = Builder.CreateBitCast(Obj.getAddress(),
3757 ClsTy->getPointerTo(CGM.getTargetCodeGenInfo().getDefaultAS()));
3758 llvm::Value *Struct = llvm::Constant::getNullValue(ResultType);
3759 llvm::Value *ObjVal = Builder.CreateLoad(ClsVal);
3760 ObjVal = Builder.CreateBitCast(ObjVal, ClsTy);
3761 Struct = Builder.CreateInsertValue(Struct, ObjVal, {0});
3762 Struct = Builder.CreateInsertValue(Struct, MethodNum, {1});
3763 return RValue::get(Struct);
3764 }
3765
3766 case Builtin::BI__builtin_cheri_address_get:
3767 return RValue::get(Builder.CreateCall(
3768 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_address_get, IntPtrTy),
3769 {EmitScalarExpr(E->getArg(0))}));
3770 case Builtin::BI__builtin_cheri_address_set:
3771 return RValue::get(Builder.CreateCall(
3772 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_address_set, IntPtrTy),
3773 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))}));
3774 case Builtin::BI__builtin_cheri_cap_load_tags:
3775 return RValue::get(Builder.CreateCall(
3776 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_load_tags, SizeTy),
3777 {EmitScalarExpr(E->getArg(0))}));
3778
3779 case Builtin::BI__fastfail:
3780 return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
3781
3782 case Builtin::BI__builtin_coro_size: {
3783 auto & Context = getContext();
3784 auto SizeTy = Context.getSizeType();
3785 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3786 Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
3787 return RValue::get(Builder.CreateCall(F));
3788 }
3789
3790 case Builtin::BI__builtin_coro_id:
3791 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
3792 case Builtin::BI__builtin_coro_promise:
3793 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
3794 case Builtin::BI__builtin_coro_resume:
3795 return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
3796 case Builtin::BI__builtin_coro_frame:
3797 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
3798 case Builtin::BI__builtin_coro_noop:
3799 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
3800 case Builtin::BI__builtin_coro_free:
3801 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
3802 case Builtin::BI__builtin_coro_destroy:
3803 return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
3804 case Builtin::BI__builtin_coro_done:
3805 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
3806 case Builtin::BI__builtin_coro_alloc:
3807 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
3808 case Builtin::BI__builtin_coro_begin:
3809 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
3810 case Builtin::BI__builtin_coro_end:
3811 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
3812 case Builtin::BI__builtin_coro_suspend:
3813 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
3814 case Builtin::BI__builtin_coro_param:
3815 return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
3816
3817 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
3818 case Builtin::BIread_pipe:
3819 case Builtin::BIwrite_pipe: {
3820 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3821 *Arg1 = EmitScalarExpr(E->getArg(1));
3822 CGOpenCLRuntime OpenCLRT(CGM);
3823 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3824 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3825
3826 // Type of the generic packet parameter.
3827 unsigned GenericAS =
3828 CGM.getTargetAddressSpace(LangAS::opencl_generic);
3829 llvm::Type *I8PTy = llvm::PointerType::get(
3830 llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
3831
3832 // Testing which overloaded version we should generate the call for.
3833 if (2U == E->getNumArgs()) {
3834 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
3835 : "__write_pipe_2";
3836 // Creating a generic function type to be able to call with any builtin or
3837 // user defined type.
3838 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
3839 llvm::FunctionType *FTy = llvm::FunctionType::get(
3840 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3841 Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
3842 return RValue::get(
3843 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3844 {Arg0, BCast, PacketSize, PacketAlign}));
3845 } else {
3846 assert(4 == E->getNumArgs() &&
3847 "Illegal number of parameters to pipe function");
3848 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
3849 : "__write_pipe_4";
3850
3851 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
3852 Int32Ty, Int32Ty};
3853 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
3854 *Arg3 = EmitScalarExpr(E->getArg(3));
3855 llvm::FunctionType *FTy = llvm::FunctionType::get(
3856 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3857 Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
3858 // We know the third argument is an integer type, but we may need to cast
3859 // it to i32.
3860 if (Arg2->getType() != Int32Ty)
3861 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
3862 return RValue::get(Builder.CreateCall(
3863 CGM.CreateRuntimeFunction(FTy, Name),
3864 {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
3865 }
3866 }
3867 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
3868 // functions
3869 case Builtin::BIreserve_read_pipe:
3870 case Builtin::BIreserve_write_pipe:
3871 case Builtin::BIwork_group_reserve_read_pipe:
3872 case Builtin::BIwork_group_reserve_write_pipe:
3873 case Builtin::BIsub_group_reserve_read_pipe:
3874 case Builtin::BIsub_group_reserve_write_pipe: {
3875 // Composing the mangled name for the function.
3876 const char *Name;
3877 if (BuiltinID == Builtin::BIreserve_read_pipe)
3878 Name = "__reserve_read_pipe";
3879 else if (BuiltinID == Builtin::BIreserve_write_pipe)
3880 Name = "__reserve_write_pipe";
3881 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
3882 Name = "__work_group_reserve_read_pipe";
3883 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
3884 Name = "__work_group_reserve_write_pipe";
3885 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
3886 Name = "__sub_group_reserve_read_pipe";
3887 else
3888 Name = "__sub_group_reserve_write_pipe";
3889
3890 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3891 *Arg1 = EmitScalarExpr(E->getArg(1));
3892 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
3893 CGOpenCLRuntime OpenCLRT(CGM);
3894 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3895 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3896
3897 // Building the generic function prototype.
3898 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
3899 llvm::FunctionType *FTy = llvm::FunctionType::get(
3900 ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3901 // We know the second argument is an integer type, but we may need to cast
3902 // it to i32.
3903 if (Arg1->getType() != Int32Ty)
3904 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
3905 return RValue::get(
3906 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3907 {Arg0, Arg1, PacketSize, PacketAlign}));
3908 }
3909 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
3910 // functions
3911 case Builtin::BIcommit_read_pipe:
3912 case Builtin::BIcommit_write_pipe:
3913 case Builtin::BIwork_group_commit_read_pipe:
3914 case Builtin::BIwork_group_commit_write_pipe:
3915 case Builtin::BIsub_group_commit_read_pipe:
3916 case Builtin::BIsub_group_commit_write_pipe: {
3917 const char *Name;
3918 if (BuiltinID == Builtin::BIcommit_read_pipe)
3919 Name = "__commit_read_pipe";
3920 else if (BuiltinID == Builtin::BIcommit_write_pipe)
3921 Name = "__commit_write_pipe";
3922 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
3923 Name = "__work_group_commit_read_pipe";
3924 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
3925 Name = "__work_group_commit_write_pipe";
3926 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
3927 Name = "__sub_group_commit_read_pipe";
3928 else
3929 Name = "__sub_group_commit_write_pipe";
3930
3931 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
3932 *Arg1 = EmitScalarExpr(E->getArg(1));
3933 CGOpenCLRuntime OpenCLRT(CGM);
3934 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3935 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3936
3937 // Building the generic function prototype.
3938 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
3939 llvm::FunctionType *FTy =
3940 llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
3941 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3942
3943 return RValue::get(
3944 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3945 {Arg0, Arg1, PacketSize, PacketAlign}));
3946 }
3947 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
3948 case Builtin::BIget_pipe_num_packets:
3949 case Builtin::BIget_pipe_max_packets: {
3950 const char *BaseName;
3951 const PipeType *PipeTy = E->getArg(0)->getType()->getAs<PipeType>();
3952 if (BuiltinID == Builtin::BIget_pipe_num_packets)
3953 BaseName = "__get_pipe_num_packets";
3954 else
3955 BaseName = "__get_pipe_max_packets";
3956 auto Name = std::string(BaseName) +
3957 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
3958
3959 // Building the generic function prototype.
3960 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3961 CGOpenCLRuntime OpenCLRT(CGM);
3962 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
3963 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
3964 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
3965 llvm::FunctionType *FTy = llvm::FunctionType::get(
3966 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
3967
3968 return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
3969 {Arg0, PacketSize, PacketAlign}));
3970 }
3971
3972 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
3973 case Builtin::BIto_global:
3974 case Builtin::BIto_local:
3975 case Builtin::BIto_private: {
3976 auto Arg0 = EmitScalarExpr(E->getArg(0));
3977 auto NewArgT = llvm::PointerType::get(Int8Ty,
3978 CGM.getTargetAddressSpace(LangAS::opencl_generic));
3979 auto NewRetT = llvm::PointerType::get(Int8Ty,
3980 CGM.getTargetAddressSpace(E->getType()->getPointeeType().getAddressSpace()));
3981 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
3982 llvm::Value *NewArg;
3983 if (Arg0->getType()->getPointerAddressSpace() !=
3984 NewArgT->getPointerAddressSpace())
3985 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
3986 else
3987 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
3988 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
3989 auto NewCall =
3990 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
3991 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
3992 ConvertType(E->getType())));
3993 }
3994
3995 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
3996 // It contains four different overload formats specified in Table 6.13.17.1.
3997 case Builtin::BIenqueue_kernel: {
3998 StringRef Name; // Generated function call name
3999 unsigned NumArgs = E->getNumArgs();
4000
4001 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4002 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4003 CGM.getTargetAddressSpace(LangAS::opencl_generic));
4004
4005 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4006 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4007 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4008 llvm::Value *Range = NDRangeL.getAddress().getPointer();
4009 llvm::Type *RangeTy = NDRangeL.getAddress().getType();
4010
4011 if (NumArgs == 4) {
4012 // The most basic form of the call with parameters:
4013 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4014 Name = "__enqueue_kernel_basic";
4015 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4016 GenericVoidPtrTy};
4017 llvm::FunctionType *FTy = llvm::FunctionType::get(
4018 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4019
4020 auto Info =
4021 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4022 llvm::Value *Kernel =
4023 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4024 llvm::Value *Block =
4025 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4026
4027 AttrBuilder B;
4028 B.addByValAttr(NDRangeL.getAddress().getElementType());
4029 llvm::AttributeList ByValAttrSet =
4030 llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4031
4032 auto RTCall =
4033 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4034 {Queue, Flags, Range, Kernel, Block});
4035 RTCall->setAttributes(ByValAttrSet);
4036 return RValue::get(RTCall);
4037 }
4038 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
4039
4040 // Create a temporary array to hold the sizes of local pointer arguments
4041 // for the block. \p First is the position of the first size argument.
4042 auto CreateArrayForSizeVar = [=](unsigned First)
4043 -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4044 llvm::APInt ArraySize(32, NumArgs - First);
4045 QualType SizeArrayTy = getContext().getConstantArrayType(
4046 getContext().getSizeType(), ArraySize, ArrayType::Normal,
4047 /*IndexTypeQuals=*/0);
4048 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4049 llvm::Value *TmpPtr = Tmp.getPointer();
4050 llvm::Value *TmpSize = EmitLifetimeStart(
4051 CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4052 llvm::Value *ElemPtr;
4053 // Each of the following arguments specifies the size of the corresponding
4054 // argument passed to the enqueued block.
4055 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4056 for (unsigned I = First; I < NumArgs; ++I) {
4057 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4058 auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
4059 if (I == First)
4060 ElemPtr = GEP;
4061 auto *V =
4062 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4063 Builder.CreateAlignedStore(
4064 V, GEP, CGM.getDataLayout().getPrefTypeAlignment(SizeTy));
4065 }
4066 return std::tie(ElemPtr, TmpSize, TmpPtr);
4067 };
4068
4069 // Could have events and/or varargs.
4070 if (E->getArg(3)->getType()->isBlockPointerType()) {
4071 // No events passed, but has variadic arguments.
4072 Name = "__enqueue_kernel_varargs";
4073 auto Info =
4074 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4075 llvm::Value *Kernel =
4076 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4077 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4078 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4079 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4080
4081 // Create a vector of the arguments, as well as a constant value to
4082 // express to the runtime the number of variadic arguments.
4083 std::vector<llvm::Value *> Args = {
4084 Queue, Flags, Range,
4085 Kernel, Block, ConstantInt::get(IntTy, NumArgs - 4),
4086 ElemPtr};
4087 std::vector<llvm::Type *> ArgTys = {
4088 QueueTy, IntTy, RangeTy, GenericVoidPtrTy,
4089 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4090
4091 llvm::FunctionType *FTy = llvm::FunctionType::get(
4092 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4093 auto Call =
4094 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4095 llvm::ArrayRef<llvm::Value *>(Args)));
4096 if (TmpSize)
4097 EmitLifetimeEnd(TmpSize, TmpPtr);
4098 return Call;
4099 }
4100 // Any calls now have event arguments passed.
4101 if (NumArgs >= 7) {
4102 llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4103 llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4104 CGM.getTargetAddressSpace(LangAS::opencl_generic));
4105
4106 llvm::Value *NumEvents =
4107 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4108
4109 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4110 // to be a null pointer constant (including `0` literal), we can take it
4111 // into account and emit null pointer directly.
4112 llvm::Value *EventWaitList = nullptr;
4113 if (E->getArg(4)->isNullPointerConstant(
4114 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4115 EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4116 } else {
4117 EventWaitList = E->getArg(4)->getType()->isArrayType()
4118 ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4119 : EmitScalarExpr(E->getArg(4));
4120 // Convert to generic address space.
4121 EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4122 }
4123 llvm::Value *EventRet = nullptr;
4124 if (E->getArg(5)->isNullPointerConstant(
4125 getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4126 EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4127 } else {
4128 EventRet =
4129 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4130 }
4131
4132 auto Info =
4133 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4134 llvm::Value *Kernel =
4135 Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4136 llvm::Value *Block =
4137 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4138
4139 std::vector<llvm::Type *> ArgTys = {
4140 QueueTy, Int32Ty, RangeTy, Int32Ty,
4141 EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
4142
4143 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
4144 NumEvents, EventWaitList, EventRet,
4145 Kernel, Block};
4146
4147 if (NumArgs == 7) {
4148 // Has events but no variadics.
4149 Name = "__enqueue_kernel_basic_events";
4150 llvm::FunctionType *FTy = llvm::FunctionType::get(
4151 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4152 return RValue::get(
4153 Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4154 llvm::ArrayRef<llvm::Value *>(Args)));
4155 }
4156 // Has event info and variadics
4157 // Pass the number of variadics to the runtime function too.
4158 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
4159 ArgTys.push_back(Int32Ty);
4160 Name = "__enqueue_kernel_events_varargs";
4161
4162 llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4163 std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
4164 Args.push_back(ElemPtr);
4165 ArgTys.push_back(ElemPtr->getType());
4166
4167 llvm::FunctionType *FTy = llvm::FunctionType::get(
4168 Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4169 auto Call =
4170 RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
4171 llvm::ArrayRef<llvm::Value *>(Args)));
4172 if (TmpSize)
4173 EmitLifetimeEnd(TmpSize, TmpPtr);
4174 return Call;
4175 }
4176 LLVM_FALLTHROUGH;
4177 }
4178 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
4179 // parameter.
4180 case Builtin::BIget_kernel_work_group_size: {
4181 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4182 CGM.getTargetAddressSpace(LangAS::opencl_generic));
4183 auto Info =
4184 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4185 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4186 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4187 return RValue::get(Builder.CreateCall(
4188 CGM.CreateRuntimeFunction(
4189 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4190 false),
4191 "__get_kernel_work_group_size_impl"),
4192 {Kernel, Arg}));
4193 }
4194 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
4195 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4196 CGM.getTargetAddressSpace(LangAS::opencl_generic));
4197 auto Info =
4198 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
4199 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4200 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4201 return RValue::get(Builder.CreateCall(
4202 CGM.CreateRuntimeFunction(
4203 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
4204 false),
4205 "__get_kernel_preferred_work_group_size_multiple_impl"),
4206 {Kernel, Arg}));
4207 }
4208 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
4209 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
4210 llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4211 CGM.getTargetAddressSpace(LangAS::opencl_generic));
4212 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
4213 llvm::Value *NDRange = NDRangeL.getAddress().getPointer();
4214 auto Info =
4215 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
4216 Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4217 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4218 const char *Name =
4219 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
4220 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
4221 : "__get_kernel_sub_group_count_for_ndrange_impl";
4222 return RValue::get(Builder.CreateCall(
4223 CGM.CreateRuntimeFunction(
4224 llvm::FunctionType::get(
4225 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
4226 false),
4227 Name),
4228 {NDRange, Kernel, Block}));
4229 }
4230
4231 case Builtin::BI__builtin_store_half:
4232 case Builtin::BI__builtin_store_halff: {
4233 Value *Val = EmitScalarExpr(E->getArg(0));
4234 Address Address = EmitPointerWithAlignment(E->getArg(1));
4235 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
4236 return RValue::get(Builder.CreateStore(HalfVal, Address));
4237 }
4238 case Builtin::BI__builtin_load_half: {
4239 Address Address = EmitPointerWithAlignment(E->getArg(0));
4240 Value *HalfVal = Builder.CreateLoad(Address);
4241 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
4242 }
4243 case Builtin::BI__builtin_load_halff: {
4244 Address Address = EmitPointerWithAlignment(E->getArg(0));
4245 Value *HalfVal = Builder.CreateLoad(Address);
4246 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
4247 }
4248 case Builtin::BIprintf:
4249 if (getTarget().getTriple().isNVPTX())
4250 return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
4251 break;
4252 case Builtin::BI__builtin_canonicalize:
4253 case Builtin::BI__builtin_canonicalizef:
4254 case Builtin::BI__builtin_canonicalizel:
4255 return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
4256
4257 case Builtin::BI__builtin_thread_pointer: {
4258 if (!getContext().getTargetInfo().isTLSSupported())
4259 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
4260 // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
4261 break;
4262 }
4263 case Builtin::BI__builtin_os_log_format:
4264 return emitBuiltinOSLogFormat(*E);
4265
4266 case Builtin::BI__xray_customevent: {
4267 if (!ShouldXRayInstrumentFunction())
4268 return RValue::getIgnored();
4269
4270 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4271 XRayInstrKind::Custom))
4272 return RValue::getIgnored();
4273
4274 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4275 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
4276 return RValue::getIgnored();
4277
4278 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
4279 auto FTy = F->getFunctionType();
4280 auto Arg0 = E->getArg(0);
4281 auto Arg0Val = EmitScalarExpr(Arg0);
4282 auto Arg0Ty = Arg0->getType();
4283 auto PTy0 = FTy->getParamType(0);
4284 if (PTy0 != Arg0Val->getType()) {
4285 if (Arg0Ty->isArrayType())
4286 Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
4287 else
4288 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
4289 }
4290 auto Arg1 = EmitScalarExpr(E->getArg(1));
4291 auto PTy1 = FTy->getParamType(1);
4292 if (PTy1 != Arg1->getType())
4293 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
4294 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
4295 }
4296
4297 case Builtin::BI__xray_typedevent: {
4298 // TODO: There should be a way to always emit events even if the current
4299 // function is not instrumented. Losing events in a stream can cripple
4300 // a trace.
4301 if (!ShouldXRayInstrumentFunction())
4302 return RValue::getIgnored();
4303
4304 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
4305 XRayInstrKind::Typed))
4306 return RValue::getIgnored();
4307
4308 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
4309 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
4310 return RValue::getIgnored();
4311
4312 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
4313 auto FTy = F->getFunctionType();
4314 auto Arg0 = EmitScalarExpr(E->getArg(0));
4315 auto PTy0 = FTy->getParamType(0);
4316 if (PTy0 != Arg0->getType())
4317 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
4318 auto Arg1 = E->getArg(1);
4319 auto Arg1Val = EmitScalarExpr(Arg1);
4320 auto Arg1Ty = Arg1->getType();
4321 auto PTy1 = FTy->getParamType(1);
4322 if (PTy1 != Arg1Val->getType()) {
4323 if (Arg1Ty->isArrayType())
4324 Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
4325 else
4326 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
4327 }
4328 auto Arg2 = EmitScalarExpr(E->getArg(2));
4329 auto PTy2 = FTy->getParamType(2);
4330 if (PTy2 != Arg2->getType())
4331 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
4332 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
4333 }
4334
4335 case Builtin::BI__builtin_ms_va_start:
4336 case Builtin::BI__builtin_ms_va_end:
4337 return RValue::get(
4338 EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
4339 BuiltinID == Builtin::BI__builtin_ms_va_start));
4340
4341 case Builtin::BI__builtin_ms_va_copy: {
4342 // Lower this manually. We can't reliably determine whether or not any
4343 // given va_copy() is for a Win64 va_list from the calling convention
4344 // alone, because it's legal to do this from a System V ABI function.
4345 // With opaque pointer types, we won't have enough information in LLVM
4346 // IR to determine this from the argument types, either. Best to do it
4347 // now, while we have enough information.
4348 Address DestAddr = EmitMSVAListRef(E->getArg(0));
4349 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
4350
4351 llvm::Type *BPP = Int8PtrPtrTy;
4352
4353 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
4354 DestAddr.getAlignment());
4355 SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
4356 SrcAddr.getAlignment());
4357
4358 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
4359 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
4360 }
4361 }
4362
4363 // If this is an alias for a lib function (e.g. __builtin_sin), emit
4364 // the call using the normal call path, but using the unmangled
4365 // version of the function name.
4366 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
4367 return emitLibraryCall(*this, FD, E,
4368 CGM.getBuiltinLibFunction(FD, BuiltinID));
4369
4370 // If this is a predefined lib function (e.g. malloc), emit the call
4371 // using exactly the normal call path.
4372 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
4373 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
4374
4375 // Check that a call to a target specific builtin has the correct target
4376 // features.
4377 // This is down here to avoid non-target specific builtins, however, if
4378 // generic builtins start to require generic target features then we
4379 // can move this up to the beginning of the function.
4380 checkTargetFeatures(E, FD);
4381
4382 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
4383 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
4384
4385 // See if we have a target specific intrinsic.
4386 const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
4387 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
4388 StringRef Prefix =
4389 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
4390 if (!Prefix.empty()) {
4391 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
4392 // NOTE we don't need to perform a compatibility flag check here since the
4393 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
4394 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
4395 if (IntrinsicID == Intrinsic::not_intrinsic)
4396 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
4397 }
4398
4399 if (IntrinsicID != Intrinsic::not_intrinsic) {
4400 SmallVector<Value*, 16> Args;
4401
4402 // Find out if any arguments are required to be integer constant
4403 // expressions.
4404 unsigned ICEArguments = 0;
4405 ASTContext::GetBuiltinTypeError Error;
4406 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
4407 assert(Error == ASTContext::GE_None && "Should not codegen an error");
4408
4409 Function *F = CGM.getIntrinsic(IntrinsicID);
4410 llvm::FunctionType *FTy = F->getFunctionType();
4411
4412 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
4413 Value *ArgValue;
4414 // If this is a normal argument, just emit it as a scalar.
4415 if ((ICEArguments & (1 << i)) == 0) {
4416 ArgValue = EmitScalarExpr(E->getArg(i));
4417 } else {
4418 // If this is required to be a constant, constant fold it so that we
4419 // know that the generated intrinsic gets a ConstantInt.
4420 llvm::APSInt Result;
4421 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
4422 assert(IsConst && "Constant arg isn't actually constant?");
4423 (void)IsConst;
4424 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
4425 }
4426
4427 // If the intrinsic arg type is different from the builtin arg type
4428 // we need to do a bit cast.
4429 llvm::Type *PTy = FTy->getParamType(i);
4430 if (PTy != ArgValue->getType()) {
4431 // XXX - vector of pointers?
4432 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
4433 if (PtrTy->getAddressSpace() !=
4434 ArgValue->getType()->getPointerAddressSpace()) {
4435 ArgValue = Builder.CreateAddrSpaceCast(
4436 ArgValue,
4437 ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
4438 }
4439 }
4440
4441 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
4442 "Must be able to losslessly bit cast to param");
4443 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
4444 }
4445
4446 Args.push_back(ArgValue);
4447 }
4448
4449 Value *V = Builder.CreateCall(F, Args);
4450 QualType BuiltinRetType = E->getType();
4451
4452 llvm::Type *RetTy = VoidTy;
4453 if (!BuiltinRetType->isVoidType())
4454 RetTy = ConvertType(BuiltinRetType);
4455
4456 if (RetTy != V->getType()) {
4457 // XXX - vector of pointers?
4458 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
4459 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
4460 V = Builder.CreateAddrSpaceCast(
4461 V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
4462 }
4463 }
4464
4465 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
4466 "Must be able to losslessly bit cast result type");
4467 V = Builder.CreateBitCast(V, RetTy);
4468 }
4469
4470 return RValue::get(V);
4471 }
4472
4473 // See if we have a target specific builtin that needs to be lowered.
4474 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
4475 return RValue::get(V);
4476
4477 ErrorUnsupported(E, "builtin function");
4478
4479 // Unknown builtin, for now just dump it out and return undef.
4480 return GetUndefRValue(E->getType());
4481}
4482
4483static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
4484 unsigned BuiltinID, const CallExpr *E,
4485 llvm::Triple::ArchType Arch) {
4486 switch (Arch) {
4487 case llvm::Triple::arm:
4488 case llvm::Triple::armeb:
4489 case llvm::Triple::thumb:
4490 case llvm::Triple::thumbeb:
4491 return CGF->EmitARMBuiltinExpr(BuiltinID, E, Arch);
4492 case llvm::Triple::aarch64:
4493 case llvm::Triple::aarch64_be:
4494 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
4495 case llvm::Triple::x86:
4496 case llvm::Triple::x86_64:
4497 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
4498 case llvm::Triple::ppc:
4499 case llvm::Triple::ppc64:
4500 case llvm::Triple::ppc64le:
4501 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
4502 case llvm::Triple::mips64:
4503 case llvm::Triple::cheri:
4504 return CGF->EmitMIPSBuiltinExpr(BuiltinID, E);
4505 case llvm::Triple::r600:
4506 case llvm::Triple::amdgcn:
4507 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
4508 case llvm::Triple::systemz:
4509 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
4510 case llvm::Triple::nvptx:
4511 case llvm::Triple::nvptx64:
4512 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
4513 case llvm::Triple::wasm32:
4514 case llvm::Triple::wasm64:
4515 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
4516 case llvm::Triple::hexagon:
4517 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
4518 default:
4519 return nullptr;
4520 }
4521}
4522
4523Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
4524 const CallExpr *E) {
4525 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
4526 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
4527 return EmitTargetArchBuiltinExpr(
4528 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
4529 getContext().getAuxTargetInfo()->getTriple().getArch());
4530 }
4531
4532 return EmitTargetArchBuiltinExpr(this, BuiltinID, E,
4533 getTarget().getTriple().getArch());
4534}
4535
4536static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
4537 NeonTypeFlags TypeFlags,
4538 bool HasLegalHalfType=true,
4539 bool V1Ty=false) {
4540 int IsQuad = TypeFlags.isQuad();
4541 switch (TypeFlags.getEltType()) {
4542 case NeonTypeFlags::Int8:
4543 case NeonTypeFlags::Poly8:
4544 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
4545 case NeonTypeFlags::Int16:
4546 case NeonTypeFlags::Poly16:
4547 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4548 case NeonTypeFlags::Float16:
4549 if (HasLegalHalfType)
4550 return llvm::VectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
4551 else
4552 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
4553 case NeonTypeFlags::Int32:
4554 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
4555 case NeonTypeFlags::Int64:
4556 case NeonTypeFlags::Poly64:
4557 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
4558 case NeonTypeFlags::Poly128:
4559 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
4560 // There is a lot of i128 and f128 API missing.
4561 // so we use v16i8 to represent poly128 and get pattern matched.
4562 return llvm::VectorType::get(CGF->Int8Ty, 16);
4563 case NeonTypeFlags::Float32:
4564 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
4565 case NeonTypeFlags::Float64:
4566 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
4567 }
4568 llvm_unreachable("Unknown vector element type!");
4569}
4570
4571static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
4572 NeonTypeFlags IntTypeFlags) {
4573 int IsQuad = IntTypeFlags.isQuad();
4574 switch (IntTypeFlags.getEltType()) {
4575 case NeonTypeFlags::Int16:
4576 return llvm::VectorType::get(CGF->HalfTy, (4 << IsQuad));
4577 case NeonTypeFlags::Int32:
4578 return llvm::VectorType::get(CGF->FloatTy, (2 << IsQuad));
4579 case NeonTypeFlags::Int64:
4580 return llvm::VectorType::get(CGF->DoubleTy, (1 << IsQuad));
4581 default:
4582 llvm_unreachable("Type can't be converted to floating-point!");
4583 }
4584}
4585
4586Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
4587 unsigned nElts = V->getType()->getVectorNumElements();
4588 Value* SV = llvm::ConstantVector::getSplat(nElts, C);
4589 return Builder.CreateShuffleVector(V, V, SV, "lane");
4590}
4591
4592Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
4593 const char *name,
4594 unsigned shift, bool rightshift) {
4595 unsigned j = 0;
4596 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
4597 ai != ae; ++ai, ++j)
4598 if (shift > 0 && shift == j)
4599 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
4600 else
4601 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
4602
4603 return Builder.CreateCall(F, Ops, name);
4604}
4605
4606Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
4607 bool neg) {
4608 int SV = cast<ConstantInt>(V)->getSExtValue();
4609 return ConstantInt::get(Ty, neg ? -SV : SV);
4610}
4611
4612// Right-shift a vector by a constant.
4613Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
4614 llvm::Type *Ty, bool usgn,
4615 const char *name) {
4616 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4617
4618 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
4619 int EltSize = VTy->getScalarSizeInBits();
4620
4621 Vec = Builder.CreateBitCast(Vec, Ty);
4622
4623 // lshr/ashr are undefined when the shift amount is equal to the vector
4624 // element size.
4625 if (ShiftAmt == EltSize) {
4626 if (usgn) {
4627 // Right-shifting an unsigned value by its size yields 0.
4628 return llvm::ConstantAggregateZero::get(VTy);
4629 } else {
4630 // Right-shifting a signed value by its size is equivalent
4631 // to a shift of size-1.
4632 --ShiftAmt;
4633 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
4634 }
4635 }
4636
4637 Shift = EmitNeonShiftVector(Shift, Ty, false);
4638 if (usgn)
4639 return Builder.CreateLShr(Vec, Shift, name);
4640 else
4641 return Builder.CreateAShr(Vec, Shift, name);
4642}
4643
4644enum {
4645 AddRetType = (1 << 0),
4646 Add1ArgType = (1 << 1),
4647 Add2ArgTypes = (1 << 2),
4648
4649 VectorizeRetType = (1 << 3),
4650 VectorizeArgTypes = (1 << 4),
4651
4652 InventFloatType = (1 << 5),
4653 UnsignedAlts = (1 << 6),
4654
4655 Use64BitVectors = (1 << 7),
4656 Use128BitVectors = (1 << 8),
4657
4658 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
4659 VectorRet = AddRetType | VectorizeRetType,
4660 VectorRetGetArgs01 =
4661 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
4662 FpCmpzModifiers =
4663 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
4664};
4665
4666namespace {
4667struct NeonIntrinsicInfo {
4668 const char *NameHint;
4669 unsigned BuiltinID;
4670 unsigned LLVMIntrinsic;
4671 unsigned AltLLVMIntrinsic;
4672 unsigned TypeModifier;
4673
4674 bool operator<(unsigned RHSBuiltinID) const {
4675 return BuiltinID < RHSBuiltinID;
4676 }
4677 bool operator<(const NeonIntrinsicInfo &TE) const {
4678 return BuiltinID < TE.BuiltinID;
4679 }
4680};
4681} // end anonymous namespace
4682
4683#define NEONMAP0(NameBase) \
4684 { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
4685
4686#define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
4687 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4688 Intrinsic::LLVMIntrinsic, 0, TypeModifier }
4689
4690#define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
4691 { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
4692 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
4693 TypeModifier }
4694
4695static const NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
4696 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4697 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
4698 NEONMAP1(vabs_v, arm_neon_vabs, 0),
4699 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
4700 NEONMAP0(vaddhn_v),
4701 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
4702 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
4703 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
4704 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
4705 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
4706 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
4707 NEONMAP1(vcage_v, arm_neon_vacge, 0),
4708 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
4709 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
4710 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
4711 NEONMAP1(vcale_v, arm_neon_vacge, 0),
4712 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
4713 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
4714 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
4715 NEONMAP0(vceqz_v),
4716 NEONMAP0(vceqzq_v),
4717 NEONMAP0(vcgez_v),
4718 NEONMAP0(vcgezq_v),
4719 NEONMAP0(vcgtz_v),
4720 NEONMAP0(vcgtzq_v),
4721 NEONMAP0(vclez_v),
4722 NEONMAP0(vclezq_v),
4723 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
4724 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
4725 NEONMAP0(vcltz_v),
4726 NEONMAP0(vcltzq_v),
4727 NEONMAP1(vclz_v, ctlz, Add1ArgType),
4728 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
4729 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
4730 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
4731 NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
4732 NEONMAP0(vcvt_f16_v),
4733 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
4734 NEONMAP0(vcvt_f32_v),
4735 NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4736 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4737 NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4738 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4739 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4740 NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4741 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4742 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4743 NEONMAP0(vcvt_s16_v),
4744 NEONMAP0(vcvt_s32_v),
4745 NEONMAP0(vcvt_s64_v),
4746 NEONMAP0(vcvt_u16_v),
4747 NEONMAP0(vcvt_u32_v),
4748 NEONMAP0(vcvt_u64_v),
4749 NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
4750 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
4751 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
4752 NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
4753 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
4754 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
4755 NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
4756 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
4757 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
4758 NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
4759 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
4760 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
4761 NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
4762 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
4763 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
4764 NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
4765 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
4766 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
4767 NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
4768 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
4769 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
4770 NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
4771 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
4772 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
4773 NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
4774 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
4775 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
4776 NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
4777 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
4778 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
4779 NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
4780 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
4781 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
4782 NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
4783 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
4784 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
4785 NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
4786 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
4787 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
4788 NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
4789 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
4790 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
4791 NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
4792 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
4793 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
4794 NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
4795 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
4796 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
4797 NEONMAP0(vcvtq_f16_v),
4798 NEONMAP0(vcvtq_f32_v),
4799 NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4800 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
4801 NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
4802 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
4803 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
4804 NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
4805 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
4806 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
4807 NEONMAP0(vcvtq_s16_v),
4808 NEONMAP0(vcvtq_s32_v),
4809 NEONMAP0(vcvtq_s64_v),
4810 NEONMAP0(vcvtq_u16_v),
4811 NEONMAP0(vcvtq_u32_v),
4812 NEONMAP0(vcvtq_u64_v),
4813 NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
4814 NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
4815 NEONMAP0(vext_v),
4816 NEONMAP0(vextq_v),
4817 NEONMAP0(vfma_v),
4818 NEONMAP0(vfmaq_v),
4819 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4820 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
4821 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4822 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
4823 NEONMAP0(vld1_dup_v),
4824 NEONMAP1(vld1_v, arm_neon_vld1, 0),
4825 NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
4826 NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
4827 NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
4828 NEONMAP0(vld1q_dup_v),
4829 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
4830 NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
4831 NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
4832 NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
4833 NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
4834 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
4835 NEONMAP1(vld2_v, arm_neon_vld2, 0),
4836 NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
4837 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
4838 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
4839 NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
4840 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
4841 NEONMAP1(vld3_v, arm_neon_vld3, 0),
4842 NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
4843 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
4844 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
4845 NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
4846 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
4847 NEONMAP1(vld4_v, arm_neon_vld4, 0),
4848 NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
4849 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
4850 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
4851 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4852 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
4853 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
4854 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
4855 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4856 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
4857 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
4858 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
4859 NEONMAP0(vmovl_v),
4860 NEONMAP0(vmovn_v),
4861 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
4862 NEONMAP0(vmull_v),
4863 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
4864 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4865 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
4866 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
4867 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4868 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
4869 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
4870 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
4871 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
4872 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
4873 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
4874 NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
4875 NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
4876 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
4877 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
4878 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
4879 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
4880 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
4881 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
4882 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
4883 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
4884 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
4885 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
4886 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
4887 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4888 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
4889 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4890 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4891 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
4892 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
4893 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
4894 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
4895 NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
4896 NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
4897 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
4898 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4899 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
4900 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
4901 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
4902 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4903 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
4904 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
4905 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
4906 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
4907 NEONMAP0(vrndi_v),
4908 NEONMAP0(vrndiq_v),
4909 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
4910 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
4911 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
4912 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
4913 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
4914 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
4915 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
4916 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
4917 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
4918 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4919 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
4920 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4921 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
4922 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4923 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
4924 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
4925 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
4926 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
4927 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
4928 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
4929 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
4930 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
4931 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
4932 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
4933 NEONMAP0(vshl_n_v),
4934 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4935 NEONMAP0(vshll_n_v),
4936 NEONMAP0(vshlq_n_v),
4937 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
4938 NEONMAP0(vshr_n_v),
4939 NEONMAP0(vshrn_n_v),
4940 NEONMAP0(vshrq_n_v),
4941 NEONMAP1(vst1_v, arm_neon_vst1, 0),
4942 NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
4943 NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
4944 NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
4945 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
4946 NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
4947 NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
4948 NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
4949 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
4950 NEONMAP1(vst2_v, arm_neon_vst2, 0),
4951 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
4952 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
4953 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
4954 NEONMAP1(vst3_v, arm_neon_vst3, 0),
4955 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
4956 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
4957 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
4958 NEONMAP1(vst4_v, arm_neon_vst4, 0),
4959 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
4960 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
4961 NEONMAP0(vsubhn_v),
4962 NEONMAP0(vtrn_v),
4963 NEONMAP0(vtrnq_v),
4964 NEONMAP0(vtst_v),
4965 NEONMAP0(vtstq_v),
4966 NEONMAP0(vuzp_v),
4967 NEONMAP0(vuzpq_v),
4968 NEONMAP0(vzip_v),
4969 NEONMAP0(vzipq_v)
4970};
4971
4972static const NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
4973 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
4974 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
4975 NEONMAP0(vaddhn_v),
4976 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
4977 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
4978 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
4979 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
4980 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
4981 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
4982 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
4983 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
4984 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
4985 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
4986 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
4987 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
4988 NEONMAP0(vceqz_v),
4989 NEONMAP0(vceqzq_v),
4990 NEONMAP0(vcgez_v),
4991 NEONMAP0(vcgezq_v),
4992 NEONMAP0(vcgtz_v),
4993 NEONMAP0(vcgtzq_v),
4994 NEONMAP0(vclez_v),
4995 NEONMAP0(vclezq_v),
4996 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
4997 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
4998 NEONMAP0(vcltz_v),
4999 NEONMAP0(vcltzq_v),
5000 NEONMAP1(vclz_v, ctlz, Add1ArgType),
5001 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5002 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5003 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5004 NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5005 NEONMAP0(vcvt_f16_v),
5006 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5007 NEONMAP0(vcvt_f32_v),
5008 NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5009 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5010 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5011 NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5012 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5013 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5014 NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5015 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5016 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5017 NEONMAP0(vcvtq_f16_v),
5018 NEONMAP0(vcvtq_f32_v),
5019 NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5020 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5021 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5022 NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5023 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5024 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5025 NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5026 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5027 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5028 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
5029 NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5030 NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
5031 NEONMAP0(vext_v),
5032 NEONMAP0(vextq_v),
5033 NEONMAP0(vfma_v),
5034 NEONMAP0(vfmaq_v),
5035 NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
5036 NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
5037 NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
5038 NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
5039 NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
5040 NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
5041 NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
5042 NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
5043 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5044 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
5045 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5046 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
5047 NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
5048 NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
5049 NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
5050 NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
5051 NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
5052 NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
5053 NEONMAP0(vmovl_v),
5054 NEONMAP0(vmovn_v),
5055 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
5056 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
5057 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
5058 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5059 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
5060 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
5061 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
5062 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
5063 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5064 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
5065 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
5066 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
5067 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
5068 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
5069 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
5070 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
5071 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
5072 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
5073 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
5074 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
5075 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
5076 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5077 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
5078 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
5079 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5080 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
5081 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
5082 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
5083 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
5084 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5085 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
5086 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
5087 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5088 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
5089 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
5090 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
5091 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5092 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
5093 NEONMAP0(vrndi_v),
5094 NEONMAP0(vrndiq_v),
5095 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5096 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
5097 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5098 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
5099 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5100 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
5101 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
5102 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
5103 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
5104 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
5105 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
5106 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
5107 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
5108 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
5109 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
5110 NEONMAP0(vshl_n_v),
5111 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5112 NEONMAP0(vshll_n_v),
5113 NEONMAP0(vshlq_n_v),
5114 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
5115 NEONMAP0(vshr_n_v),
5116 NEONMAP0(vshrn_n_v),
5117 NEONMAP0(vshrq_n_v),
5118 NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
5119 NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
5120 NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
5121 NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
5122 NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
5123 NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
5124 NEONMAP0(vsubhn_v),
5125 NEONMAP0(vtst_v),
5126 NEONMAP0(vtstq_v),
5127};
5128
5129static const NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
5130 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
5131 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
5132 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
5133 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5134 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5135 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
5136 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
5137 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5138 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5139 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5140 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
5141 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
5142 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
5143 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
5144 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5145 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5146 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5147 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5148 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5149 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5150 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
5151 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
5152 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
5153 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
5154 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5155 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5156 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5157 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5158 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5159 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5160 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5161 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5162 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5163 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5164 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5165 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5166 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5167 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5168 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5169 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5170 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5171 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5172 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5173 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5174 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5175 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5176 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5177 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5178 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
5179 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5180 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5181 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5182 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5183 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5184 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5185 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5186 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5187 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
5188 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
5189 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5190 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5191 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5192 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5193 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5194 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5195 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5196 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5197 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
5198 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
5199 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
5200 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
5201 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
5202 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5203 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
5204 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5205 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
5206 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5207 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
5208 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5209 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
5210 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
5211 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
5212 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5213 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
5214 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
5215 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
5216 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5217 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5218 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
5219 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
5220 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
5221 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
5222 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
5223 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
5224 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
5225 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
5226 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
5227 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
5228 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
5229 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
5230 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5231 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5232 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
5233 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
5234 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
5235 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5236 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
5237 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5238 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
5239 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
5240 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
5241 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
5242 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
5243 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5244 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5245 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
5246 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
5247 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
5248 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
5249 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
5250 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
5251 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
5252 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
5253 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5254 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5255 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
5256 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
5257 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
5258 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5259 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
5260 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5261 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5262 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5263 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5264 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
5265 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
5266 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5267 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5268 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
5269 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
5270 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
5271 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
5272 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
5273 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
5274 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5275 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
5276 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
5277 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
5278 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
5279 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5280 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5281 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
5282 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
5283 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
5284 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5285 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
5286 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5287 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5288 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
5289 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
5290 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
5291 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
5292 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
5293 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
5294 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
5295 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
5296 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
5297 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
5298 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
5299 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
5300 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
5301 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
5302 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
5303 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
5304 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
5305 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
5306 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
5307 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
5308 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
5309 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
5310 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
5311 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
5312 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5313 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
5314 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
5315 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
5316 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
5317 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
5318 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5319 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
5320 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
5321 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
5322 // FP16 scalar intrinisics go here.
5323 NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
5324 NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5325 NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
5326 NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5327 NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
5328 NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5329 NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
5330 NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5331 NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
5332 NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5333 NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
5334 NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5335 NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
5336 NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5337 NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
5338 NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5339 NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
5340 NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5341 NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
5342 NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5343 NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
5344 NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5345 NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
5346 NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5347 NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
5348 NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
5349 NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
5350 NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
5351 NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
5352 NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
5353};
5354
5355#undef NEONMAP0
5356#undef NEONMAP1
5357#undef NEONMAP2
5358
5359static bool NEONSIMDIntrinsicsProvenSorted = false;
5360
5361static bool AArch64SIMDIntrinsicsProvenSorted = false;
5362static bool AArch64SISDIntrinsicsProvenSorted = false;
5363
5364
5365static const NeonIntrinsicInfo *
5366findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
5367 unsigned BuiltinID, bool &MapProvenSorted) {
5368
5369#ifndef NDEBUG
5370 if (!MapProvenSorted) {
5371 assert(std::is_sorted(std::begin(IntrinsicMap), std::end(IntrinsicMap)));
5372 MapProvenSorted = true;
5373 }
5374#endif
5375
5376 const NeonIntrinsicInfo *Builtin =
5377 std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
5378
5379 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
5380 return Builtin;
5381
5382 return nullptr;
5383}
5384
5385Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
5386 unsigned Modifier,
5387 llvm::Type *ArgType,
5388 const CallExpr *E) {
5389 int VectorSize = 0;
5390 if (Modifier & Use64BitVectors)
5391 VectorSize = 64;
5392 else if (Modifier & Use128BitVectors)
5393 VectorSize = 128;
5394
5395 // Return type.
5396 SmallVector<llvm::Type *, 3> Tys;
5397 if (Modifier & AddRetType) {
5398 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
5399 if (Modifier & VectorizeRetType)
5400 Ty = llvm::VectorType::get(
5401 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
5402
5403 Tys.push_back(Ty);
5404 }
5405
5406 // Arguments.
5407 if (Modifier & VectorizeArgTypes) {
5408 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
5409 ArgType = llvm::VectorType::get(ArgType, Elts);
5410 }
5411
5412 if (Modifier & (Add1ArgType | Add2ArgTypes))
5413 Tys.push_back(ArgType);
5414
5415 if (Modifier & Add2ArgTypes)
5416 Tys.push_back(ArgType);
5417
5418 if (Modifier & InventFloatType)
5419 Tys.push_back(FloatTy);
5420
5421 return CGM.getIntrinsic(IntrinsicID, Tys);
5422}
5423
5424static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
5425 const NeonIntrinsicInfo &SISDInfo,
5426 SmallVectorImpl<Value *> &Ops,
5427 const CallExpr *E) {
5428 unsigned BuiltinID = SISDInfo.BuiltinID;
5429 unsigned int Int = SISDInfo.LLVMIntrinsic;
5430 unsigned Modifier = SISDInfo.TypeModifier;
5431 const char *s = SISDInfo.NameHint;
5432
5433 switch (BuiltinID) {
5434 case NEON::BI__builtin_neon_vcled_s64:
5435 case NEON::BI__builtin_neon_vcled_u64:
5436 case NEON::BI__builtin_neon_vcles_f32:
5437 case NEON::BI__builtin_neon_vcled_f64:
5438 case NEON::BI__builtin_neon_vcltd_s64:
5439 case NEON::BI__builtin_neon_vcltd_u64:
5440 case NEON::BI__builtin_neon_vclts_f32:
5441 case NEON::BI__builtin_neon_vcltd_f64:
5442 case NEON::BI__builtin_neon_vcales_f32:
5443 case NEON::BI__builtin_neon_vcaled_f64:
5444 case NEON::BI__builtin_neon_vcalts_f32:
5445 case NEON::BI__builtin_neon_vcaltd_f64:
5446 // Only one direction of comparisons actually exist, cmle is actually a cmge
5447 // with swapped operands. The table gives us the right intrinsic but we
5448 // still need to do the swap.
5449 std::swap(Ops[0], Ops[1]);
5450 break;
5451 }
5452
5453 assert(Int && "Generic code assumes a valid intrinsic");
5454
5455 // Determine the type(s) of this overloaded AArch64 intrinsic.
5456 const Expr *Arg = E->getArg(0);
5457 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
5458 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
5459
5460 int j = 0;
5461 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
5462 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5463 ai != ae; ++ai, ++j) {
5464 llvm::Type *ArgTy = ai->getType();
5465 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
5466 ArgTy->getPrimitiveSizeInBits())
5467 continue;
5468
5469 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
5470 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
5471 // it before inserting.
5472 Ops[j] =
5473 CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
5474 Ops[j] =
5475 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
5476 }
5477
5478 Value *Result = CGF.EmitNeonCall(F, Ops, s);
5479 llvm::Type *ResultType = CGF.ConvertType(E->getType());
5480 if (ResultType->getPrimitiveSizeInBits() <
5481 Result->getType()->getPrimitiveSizeInBits())
5482 return CGF.Builder.CreateExtractElement(Result, C0);
5483
5484 return CGF.Builder.CreateBitCast(Result, ResultType, s);
5485}
5486
5487Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
5488 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
5489 const char *NameHint, unsigned Modifier, const CallExpr *E,
5490 SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
5491 llvm::Triple::ArchType Arch) {
5492 // Get the last argument, which specifies the vector type.
5493 llvm::APSInt NeonTypeConst;
5494 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
5495 if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
5496 return nullptr;
5497
5498 // Determine the type of this overloaded NEON intrinsic.
5499 NeonTypeFlags Type(NeonTypeConst.getZExtValue());
5500 bool Usgn = Type.isUnsigned();
5501 bool Quad = Type.isQuad();
5502 const bool HasLegalHalfType = getTarget().hasLegalHalfType();
5503
5504 llvm::VectorType *VTy = GetNeonType(this, Type, HasLegalHalfType);
5505 llvm::Type *Ty = VTy;
5506 if (!Ty)
5507 return nullptr;
5508
5509 auto getAlignmentValue32 = [&](Address addr) -> Value* {
5510 return Builder.getInt32(addr.getAlignment().getQuantity());
5511 };
5512
5513 unsigned Int = LLVMIntrinsic;
5514 if ((Modifier & UnsignedAlts) && !Usgn)
5515 Int = AltLLVMIntrinsic;
5516
5517 switch (BuiltinID) {
5518 default: break;
5519 case NEON::BI__builtin_neon_vpadd_v:
5520 case NEON::BI__builtin_neon_vpaddq_v:
5521 // We don't allow fp/int overloading of intrinsics.
5522 if (VTy->getElementType()->isFloatingPointTy() &&
5523 Int == Intrinsic::aarch64_neon_addp)
5524 Int = Intrinsic::aarch64_neon_faddp;
5525 break;
5526 case NEON::BI__builtin_neon_vabs_v:
5527 case NEON::BI__builtin_neon_vabsq_v:
5528 if (VTy->getElementType()->isFloatingPointTy())
5529 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
5530 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
5531 case NEON::BI__builtin_neon_vaddhn_v: {
5532 llvm::VectorType *SrcTy =
5533 llvm::VectorType::getExtendedElementVectorType(VTy);
5534
5535 // %sum = add <4 x i32> %lhs, %rhs
5536 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5537 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5538 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
5539
5540 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5541 Constant *ShiftAmt =
5542 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5543 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
5544
5545 // %res = trunc <4 x i32> %high to <4 x i16>
5546 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
5547 }
5548 case NEON::BI__builtin_neon_vcale_v:
5549 case NEON::BI__builtin_neon_vcaleq_v:
5550 case NEON::BI__builtin_neon_vcalt_v:
5551 case NEON::BI__builtin_neon_vcaltq_v:
5552 std::swap(Ops[0], Ops[1]);
5553 LLVM_FALLTHROUGH;
5554 case NEON::BI__builtin_neon_vcage_v:
5555 case NEON::BI__builtin_neon_vcageq_v:
5556 case NEON::BI__builtin_neon_vcagt_v:
5557 case NEON::BI__builtin_neon_vcagtq_v: {
5558 llvm::Type *Ty;
5559 switch (VTy->getScalarSizeInBits()) {
5560 default: llvm_unreachable("unexpected type");
5561 case 32:
5562 Ty = FloatTy;
5563 break;
5564 case 64:
5565 Ty = DoubleTy;
5566 break;
5567 case 16:
5568 Ty = HalfTy;
5569 break;
5570 }
5571 llvm::Type *VecFlt = llvm::VectorType::get(Ty, VTy->getNumElements());
5572 llvm::Type *Tys[] = { VTy, VecFlt };
5573 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5574 return EmitNeonCall(F, Ops, NameHint);
5575 }
5576 case NEON::BI__builtin_neon_vceqz_v:
5577 case NEON::BI__builtin_neon_vceqzq_v:
5578 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
5579 ICmpInst::ICMP_EQ, "vceqz");
5580 case NEON::BI__builtin_neon_vcgez_v:
5581 case NEON::BI__builtin_neon_vcgezq_v:
5582 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
5583 ICmpInst::ICMP_SGE, "vcgez");
5584 case NEON::BI__builtin_neon_vclez_v:
5585 case NEON::BI__builtin_neon_vclezq_v:
5586 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
5587 ICmpInst::ICMP_SLE, "vclez");
5588 case NEON::BI__builtin_neon_vcgtz_v:
5589 case NEON::BI__builtin_neon_vcgtzq_v:
5590 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
5591 ICmpInst::ICMP_SGT, "vcgtz");
5592 case NEON::BI__builtin_neon_vcltz_v:
5593 case NEON::BI__builtin_neon_vcltzq_v:
5594 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
5595 ICmpInst::ICMP_SLT, "vcltz");
5596 case NEON::BI__builtin_neon_vclz_v:
5597 case NEON::BI__builtin_neon_vclzq_v:
5598 // We generate target-independent intrinsic, which needs a second argument
5599 // for whether or not clz of zero is undefined; on ARM it isn't.
5600 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
5601 break;
5602 case NEON::BI__builtin_neon_vcvt_f32_v:
5603 case NEON::BI__builtin_neon_vcvtq_f32_v:
5604 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5605 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
5606 HasLegalHalfType);
5607 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5608 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5609 case NEON::BI__builtin_neon_vcvt_f16_v:
5610 case NEON::BI__builtin_neon_vcvtq_f16_v:
5611 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5612 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
5613 HasLegalHalfType);
5614 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
5615 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
5616 case NEON::BI__builtin_neon_vcvt_n_f16_v:
5617 case NEON::BI__builtin_neon_vcvt_n_f32_v:
5618 case NEON::BI__builtin_neon_vcvt_n_f64_v:
5619 case NEON::BI__builtin_neon_vcvtq_n_f16_v:
5620 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
5621 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
5622 llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
5623 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
5624 Function *F = CGM.getIntrinsic(Int, Tys);
5625 return EmitNeonCall(F, Ops, "vcvt_n");
5626 }
5627 case NEON::BI__builtin_neon_vcvt_n_s16_v:
5628 case NEON::BI__builtin_neon_vcvt_n_s32_v:
5629 case NEON::BI__builtin_neon_vcvt_n_u16_v:
5630 case NEON::BI__builtin_neon_vcvt_n_u32_v:
5631 case NEON::BI__builtin_neon_vcvt_n_s64_v:
5632 case NEON::BI__builtin_neon_vcvt_n_u64_v:
5633 case NEON::BI__builtin_neon_vcvtq_n_s16_v:
5634 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
5635 case NEON::BI__builtin_neon_vcvtq_n_u16_v:
5636 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
5637 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
5638 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
5639 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5640 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5641 return EmitNeonCall(F, Ops, "vcvt_n");
5642 }
5643 case NEON::BI__builtin_neon_vcvt_s32_v:
5644 case NEON::BI__builtin_neon_vcvt_u32_v:
5645 case NEON::BI__builtin_neon_vcvt_s64_v:
5646 case NEON::BI__builtin_neon_vcvt_u64_v:
5647 case NEON::BI__builtin_neon_vcvt_s16_v:
5648 case NEON::BI__builtin_neon_vcvt_u16_v:
5649 case NEON::BI__builtin_neon_vcvtq_s32_v:
5650 case NEON::BI__builtin_neon_vcvtq_u32_v:
5651 case NEON::BI__builtin_neon_vcvtq_s64_v:
5652 case NEON::BI__builtin_neon_vcvtq_u64_v:
5653 case NEON::BI__builtin_neon_vcvtq_s16_v:
5654 case NEON::BI__builtin_neon_vcvtq_u16_v: {
5655 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
5656 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
5657 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
5658 }
5659 case NEON::BI__builtin_neon_vcvta_s16_v:
5660 case NEON::BI__builtin_neon_vcvta_s32_v:
5661 case NEON::BI__builtin_neon_vcvta_s64_v:
5662 case NEON::BI__builtin_neon_vcvta_u16_v:
5663 case NEON::BI__builtin_neon_vcvta_u32_v:
5664 case NEON::BI__builtin_neon_vcvta_u64_v:
5665 case NEON::BI__builtin_neon_vcvtaq_s16_v:
5666 case NEON::BI__builtin_neon_vcvtaq_s32_v:
5667 case NEON::BI__builtin_neon_vcvtaq_s64_v:
5668 case NEON::BI__builtin_neon_vcvtaq_u16_v:
5669 case NEON::BI__builtin_neon_vcvtaq_u32_v:
5670 case NEON::BI__builtin_neon_vcvtaq_u64_v:
5671 case NEON::BI__builtin_neon_vcvtn_s16_v:
5672 case NEON::BI__builtin_neon_vcvtn_s32_v:
5673 case NEON::BI__builtin_neon_vcvtn_s64_v:
5674 case NEON::BI__builtin_neon_vcvtn_u16_v:
5675 case NEON::BI__builtin_neon_vcvtn_u32_v:
5676 case NEON::BI__builtin_neon_vcvtn_u64_v:
5677 case NEON::BI__builtin_neon_vcvtnq_s16_v:
5678 case NEON::BI__builtin_neon_vcvtnq_s32_v:
5679 case NEON::BI__builtin_neon_vcvtnq_s64_v:
5680 case NEON::BI__builtin_neon_vcvtnq_u16_v:
5681 case NEON::BI__builtin_neon_vcvtnq_u32_v:
5682 case NEON::BI__builtin_neon_vcvtnq_u64_v:
5683 case NEON::BI__builtin_neon_vcvtp_s16_v:
5684 case NEON::BI__builtin_neon_vcvtp_s32_v:
5685 case NEON::BI__builtin_neon_vcvtp_s64_v:
5686 case NEON::BI__builtin_neon_vcvtp_u16_v:
5687 case NEON::BI__builtin_neon_vcvtp_u32_v:
5688 case NEON::BI__builtin_neon_vcvtp_u64_v:
5689 case NEON::BI__builtin_neon_vcvtpq_s16_v:
5690 case NEON::BI__builtin_neon_vcvtpq_s32_v:
5691 case NEON::BI__builtin_neon_vcvtpq_s64_v:
5692 case NEON::BI__builtin_neon_vcvtpq_u16_v:
5693 case NEON::BI__builtin_neon_vcvtpq_u32_v:
5694 case NEON::BI__builtin_neon_vcvtpq_u64_v:
5695 case NEON::BI__builtin_neon_vcvtm_s16_v:
5696 case NEON::BI__builtin_neon_vcvtm_s32_v:
5697 case NEON::BI__builtin_neon_vcvtm_s64_v:
5698 case NEON::BI__builtin_neon_vcvtm_u16_v:
5699 case NEON::BI__builtin_neon_vcvtm_u32_v:
5700 case NEON::BI__builtin_neon_vcvtm_u64_v:
5701 case NEON::BI__builtin_neon_vcvtmq_s16_v:
5702 case NEON::BI__builtin_neon_vcvtmq_s32_v:
5703 case NEON::BI__builtin_neon_vcvtmq_s64_v:
5704 case NEON::BI__builtin_neon_vcvtmq_u16_v:
5705 case NEON::BI__builtin_neon_vcvtmq_u32_v:
5706 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
5707 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
5708 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
5709 }
5710 case NEON::BI__builtin_neon_vext_v:
5711 case NEON::BI__builtin_neon_vextq_v: {
5712 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
5713 SmallVector<uint32_t, 16> Indices;
5714 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5715 Indices.push_back(i+CV);
5716
5717 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5718 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5719 return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
5720 }
5721 case NEON::BI__builtin_neon_vfma_v:
5722 case NEON::BI__builtin_neon_vfmaq_v: {
5723 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
5724 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5725 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5726 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5727
5728 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
5729 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
5730 }
5731 case NEON::BI__builtin_neon_vld1_v:
5732 case NEON::BI__builtin_neon_vld1q_v: {
5733 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5734 Ops.push_back(getAlignmentValue32(PtrOp0));
5735 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
5736 }
5737 case NEON::BI__builtin_neon_vld1_x2_v:
5738 case NEON::BI__builtin_neon_vld1q_x2_v:
5739 case NEON::BI__builtin_neon_vld1_x3_v:
5740 case NEON::BI__builtin_neon_vld1q_x3_v:
5741 case NEON::BI__builtin_neon_vld1_x4_v:
5742 case NEON::BI__builtin_neon_vld1q_x4_v: {
5743 llvm::Type *PTy = CGM.getPointerInDefaultAS(VTy->getVectorElementType());
5744 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5745 llvm::Type *Tys[2] = { VTy, PTy };
5746 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5747 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5748 Ty = CGM.getPointerInDefaultAS(Ops[1]->getType());
5749 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5750 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5751 }
5752 case NEON::BI__builtin_neon_vld2_v:
5753 case NEON::BI__builtin_neon_vld2q_v:
5754 case NEON::BI__builtin_neon_vld3_v:
5755 case NEON::BI__builtin_neon_vld3q_v:
5756 case NEON::BI__builtin_neon_vld4_v:
5757 case NEON::BI__builtin_neon_vld4q_v:
5758 case NEON::BI__builtin_neon_vld2_dup_v:
5759 case NEON::BI__builtin_neon_vld2q_dup_v:
5760 case NEON::BI__builtin_neon_vld3_dup_v:
5761 case NEON::BI__builtin_neon_vld3q_dup_v:
5762 case NEON::BI__builtin_neon_vld4_dup_v:
5763 case NEON::BI__builtin_neon_vld4q_dup_v: {
5764 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5765 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5766 Value *Align = getAlignmentValue32(PtrOp1);
5767 Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
5768 Ty = CGM.getPointerInDefaultAS(Ops[1]->getType());
5769 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5770 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5771 }
5772 case NEON::BI__builtin_neon_vld1_dup_v:
5773 case NEON::BI__builtin_neon_vld1q_dup_v: {
5774 Value *V = UndefValue::get(Ty);
5775 Ty = CGM.getPointerInDefaultAS(VTy->getElementType());
5776 PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
5777 LoadInst *Ld = Builder.CreateLoad(PtrOp0);
5778 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
5779 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
5780 return EmitNeonSplat(Ops[0], CI);
5781 }
5782 case NEON::BI__builtin_neon_vld2_lane_v:
5783 case NEON::BI__builtin_neon_vld2q_lane_v:
5784 case NEON::BI__builtin_neon_vld3_lane_v:
5785 case NEON::BI__builtin_neon_vld3q_lane_v:
5786 case NEON::BI__builtin_neon_vld4_lane_v:
5787 case NEON::BI__builtin_neon_vld4q_lane_v: {
5788 llvm::Type *Tys[] = {Ty, Int8PtrTy};
5789 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
5790 for (unsigned I = 2; I < Ops.size() - 1; ++I)
5791 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
5792 Ops.push_back(getAlignmentValue32(PtrOp1));
5793 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
5794 Ty = CGM.getPointerInDefaultAS(Ops[1]->getType());
5795 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5796 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
5797 }
5798 case NEON::BI__builtin_neon_vmovl_v: {
5799 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
5800 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
5801 if (Usgn)
5802 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
5803 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
5804 }
5805 case NEON::BI__builtin_neon_vmovn_v: {
5806 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
5807 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
5808 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
5809 }
5810 case NEON::BI__builtin_neon_vmull_v:
5811 // FIXME: the integer vmull operations could be emitted in terms of pure
5812 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
5813 // hoisting the exts outside loops. Until global ISel comes along that can
5814 // see through such movement this leads to bad CodeGen. So we need an
5815 // intrinsic for now.
5816 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
5817 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
5818 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
5819 case NEON::BI__builtin_neon_vpadal_v:
5820 case NEON::BI__builtin_neon_vpadalq_v: {
5821 // The source operand type has twice as many elements of half the size.
5822 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5823 llvm::Type *EltTy =
5824 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5825 llvm::Type *NarrowTy =
5826 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
5827 llvm::Type *Tys[2] = { Ty, NarrowTy };
5828 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
5829 }
5830 case NEON::BI__builtin_neon_vpaddl_v:
5831 case NEON::BI__builtin_neon_vpaddlq_v: {
5832 // The source operand type has twice as many elements of half the size.
5833 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
5834 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
5835 llvm::Type *NarrowTy =
5836 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
5837 llvm::Type *Tys[2] = { Ty, NarrowTy };
5838 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
5839 }
5840 case NEON::BI__builtin_neon_vqdmlal_v:
5841 case NEON::BI__builtin_neon_vqdmlsl_v: {
5842 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
5843 Ops[1] =
5844 EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
5845 Ops.resize(2);
5846 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
5847 }
5848 case NEON::BI__builtin_neon_vqshl_n_v:
5849 case NEON::BI__builtin_neon_vqshlq_n_v:
5850 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
5851 1, false);
5852 case NEON::BI__builtin_neon_vqshlu_n_v:
5853 case NEON::BI__builtin_neon_vqshluq_n_v:
5854 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
5855 1, false);
5856 case NEON::BI__builtin_neon_vrecpe_v:
5857 case NEON::BI__builtin_neon_vrecpeq_v:
5858 case NEON::BI__builtin_neon_vrsqrte_v:
5859 case NEON::BI__builtin_neon_vrsqrteq_v:
5860 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
5861 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
5862 case NEON::BI__builtin_neon_vrndi_v:
5863 case NEON::BI__builtin_neon_vrndiq_v:
5864 Int = Intrinsic::nearbyint;
5865 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
5866 case NEON::BI__builtin_neon_vrshr_n_v:
5867 case NEON::BI__builtin_neon_vrshrq_n_v:
5868 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
5869 1, true);
5870 case NEON::BI__builtin_neon_vshl_n_v:
5871 case NEON::BI__builtin_neon_vshlq_n_v:
5872 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
5873 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
5874 "vshl_n");
5875 case NEON::BI__builtin_neon_vshll_n_v: {
5876 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
5877 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5878 if (Usgn)
5879 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
5880 else
5881 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
5882 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
5883 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
5884 }
5885 case NEON::BI__builtin_neon_vshrn_n_v: {
5886 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
5887 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5888 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
5889 if (Usgn)
5890 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
5891 else
5892 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
5893 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
5894 }
5895 case NEON::BI__builtin_neon_vshr_n_v:
5896 case NEON::BI__builtin_neon_vshrq_n_v:
5897 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
5898 case NEON::BI__builtin_neon_vst1_v:
5899 case NEON::BI__builtin_neon_vst1q_v:
5900 case NEON::BI__builtin_neon_vst2_v:
5901 case NEON::BI__builtin_neon_vst2q_v:
5902 case NEON::BI__builtin_neon_vst3_v:
5903 case NEON::BI__builtin_neon_vst3q_v:
5904 case NEON::BI__builtin_neon_vst4_v:
5905 case NEON::BI__builtin_neon_vst4q_v:
5906 case NEON::BI__builtin_neon_vst2_lane_v:
5907 case NEON::BI__builtin_neon_vst2q_lane_v:
5908 case NEON::BI__builtin_neon_vst3_lane_v:
5909 case NEON::BI__builtin_neon_vst3q_lane_v:
5910 case NEON::BI__builtin_neon_vst4_lane_v:
5911 case NEON::BI__builtin_neon_vst4q_lane_v: {
5912 llvm::Type *Tys[] = {Int8PtrTy, Ty};
5913 Ops.push_back(getAlignmentValue32(PtrOp0));
5914 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
5915 }
5916 case NEON::BI__builtin_neon_vst1_x2_v:
5917 case NEON::BI__builtin_neon_vst1q_x2_v:
5918 case NEON::BI__builtin_neon_vst1_x3_v:
5919 case NEON::BI__builtin_neon_vst1q_x3_v:
5920 case NEON::BI__builtin_neon_vst1_x4_v:
5921 case NEON::BI__builtin_neon_vst1q_x4_v: {
5922 llvm::Type *PTy = CGM.getPointerInDefaultAS(VTy->getVectorElementType());
5923 // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
5924 // in AArch64 it comes last. We may want to stick to one or another.
5925 if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be) {
5926 llvm::Type *Tys[2] = { VTy, PTy };
5927 std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
5928 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
5929 }
5930 llvm::Type *Tys[2] = { PTy, VTy };
5931 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
5932 }
5933 case NEON::BI__builtin_neon_vsubhn_v: {
5934 llvm::VectorType *SrcTy =
5935 llvm::VectorType::getExtendedElementVectorType(VTy);
5936
5937 // %sum = add <4 x i32> %lhs, %rhs
5938 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
5939 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
5940 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
5941
5942 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
5943 Constant *ShiftAmt =
5944 ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
5945 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
5946
5947 // %res = trunc <4 x i32> %high to <4 x i16>
5948 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
5949 }
5950 case NEON::BI__builtin_neon_vtrn_v:
5951 case NEON::BI__builtin_neon_vtrnq_v: {
5952 Ops[0] = Builder.CreateBitCast(Ops[0], CGM.getPointerInDefaultAS(Ty));
5953 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5954 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5955 Value *SV = nullptr;
5956
5957 for (unsigned vi = 0; vi != 2; ++vi) {
5958 SmallVector<uint32_t, 16> Indices;
5959 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5960 Indices.push_back(i+vi);
5961 Indices.push_back(i+e+vi);
5962 }
5963 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5964 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
5965 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
5966 }
5967 return SV;
5968 }
5969 case NEON::BI__builtin_neon_vtst_v:
5970 case NEON::BI__builtin_neon_vtstq_v: {
5971 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5972 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5973 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
5974 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
5975 ConstantAggregateZero::get(Ty));
5976 return Builder.CreateSExt(Ops[0], Ty, "vtst");
5977 }
5978 case NEON::BI__builtin_neon_vuzp_v:
5979 case NEON::BI__builtin_neon_vuzpq_v: {
5980 Ops[0] = Builder.CreateBitCast(Ops[0], CGM.getPointerInDefaultAS(Ty));
5981 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5982 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5983 Value *SV = nullptr;
5984
5985 for (unsigned vi = 0; vi != 2; ++vi) {
5986 SmallVector<uint32_t, 16> Indices;
5987 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5988 Indices.push_back(2*i+vi);
5989
5990 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
5991 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
5992 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
5993 }
5994 return SV;
5995 }
5996 case NEON::BI__builtin_neon_vzip_v:
5997 case NEON::BI__builtin_neon_vzipq_v: {
5998 Ops[0] = Builder.CreateBitCast(Ops[0], CGM.getPointerInDefaultAS(Ty));
5999 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6000 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6001 Value *SV = nullptr;
6002
6003 for (unsigned vi = 0; vi != 2; ++vi) {
6004 SmallVector<uint32_t, 16> Indices;
6005 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
6006 Indices.push_back((i + vi*e) >> 1);
6007 Indices.push_back(((i + vi*e) >> 1)+e);
6008 }
6009 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
6010 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
6011 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
6012 }
6013 return SV;
6014 }
6015 case NEON::BI__builtin_neon_vdot_v:
6016 case NEON::BI__builtin_neon_vdotq_v: {
6017 llvm::Type *InputTy =
6018 llvm::VectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
6019 llvm::Type *Tys[2] = { Ty, InputTy };
6020 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6021 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
6022 }
6023 case NEON::BI__builtin_neon_vfmlal_low_v:
6024 case NEON::BI__builtin_neon_vfmlalq_low_v: {
6025 llvm::Type *InputTy =
6026 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6027 llvm::Type *Tys[2] = { Ty, InputTy };
6028 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
6029 }
6030 case NEON::BI__builtin_neon_vfmlsl_low_v:
6031 case NEON::BI__builtin_neon_vfmlslq_low_v: {
6032 llvm::Type *InputTy =
6033 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6034 llvm::Type *Tys[2] = { Ty, InputTy };
6035 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
6036 }
6037 case NEON::BI__builtin_neon_vfmlal_high_v:
6038 case NEON::BI__builtin_neon_vfmlalq_high_v: {
6039 llvm::Type *InputTy =
6040 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6041 llvm::Type *Tys[2] = { Ty, InputTy };
6042 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
6043 }
6044 case NEON::BI__builtin_neon_vfmlsl_high_v:
6045 case NEON::BI__builtin_neon_vfmlslq_high_v: {
6046 llvm::Type *InputTy =
6047 llvm::VectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
6048 llvm::Type *Tys[2] = { Ty, InputTy };
6049 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
6050 }
6051 }
6052
6053 assert(Int && "Expected valid intrinsic number");
6054
6055 // Determine the type(s) of this overloaded AArch64 intrinsic.
6056 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
6057
6058 Value *Result = EmitNeonCall(F, Ops, NameHint);
6059 llvm::Type *ResultType = ConvertType(E->getType());
6060 // AArch64 intrinsic one-element vector type cast to
6061 // scalar type expected by the builtin
6062 return Builder.CreateBitCast(Result, ResultType, NameHint);
6063}
6064
6065Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
6066 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
6067 const CmpInst::Predicate Ip, const Twine &Name) {
6068 llvm::Type *OTy = Op->getType();
6069
6070 // FIXME: this is utterly horrific. We should not be looking at previous
6071 // codegen context to find out what needs doing. Unfortunately TableGen
6072 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
6073 // (etc).
6074 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
6075 OTy = BI->getOperand(0)->getType();
6076
6077 Op = Builder.CreateBitCast(Op, OTy);
6078 if (OTy->getScalarType()->isFloatingPointTy()) {
6079 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
6080 } else {
6081 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
6082 }
6083 return Builder.CreateSExt(Op, Ty, Name);
6084}
6085
6086static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
6087 Value *ExtOp, Value *IndexOp,
6088 llvm::Type *ResTy, unsigned IntID,
6089 const char *Name) {
6090 SmallVector<Value *, 2> TblOps;
6091 if (ExtOp)
6092 TblOps.push_back(ExtOp);
6093
6094 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
6095 SmallVector<uint32_t, 16> Indices;
6096 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
6097 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
6098 Indices.push_back(2*i);
6099 Indices.push_back(2*i+1);
6100 }
6101
6102 int PairPos = 0, End = Ops.size() - 1;
6103 while (PairPos < End) {
6104 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6105 Ops[PairPos+1], Indices,
6106 Name));
6107 PairPos += 2;
6108 }
6109
6110 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
6111 // of the 128-bit lookup table with zero.
6112 if (PairPos == End) {
6113 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
6114 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
6115 ZeroTbl, Indices, Name));
6116 }
6117
6118 Function *TblF;
6119 TblOps.push_back(IndexOp);
6120 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
6121
6122 return CGF.EmitNeonCall(TblF, TblOps, Name);
6123}
6124
6125Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
6126 unsigned Value;
6127 switch (BuiltinID) {
6128 default:
6129 return nullptr;
6130 case ARM::BI__builtin_arm_nop:
6131 Value = 0;
6132 break;
6133 case ARM::BI__builtin_arm_yield:
6134 case ARM::BI__yield:
6135 Value = 1;
6136 break;
6137 case ARM::BI__builtin_arm_wfe:
6138 case ARM::BI__wfe:
6139 Value = 2;
6140 break;
6141 case ARM::BI__builtin_arm_wfi:
6142 case ARM::BI__wfi:
6143 Value = 3;
6144 break;
6145 case ARM::BI__builtin_arm_sev:
6146 case ARM::BI__sev:
6147 Value = 4;
6148 break;
6149 case ARM::BI__builtin_arm_sevl:
6150 case ARM::BI__sevl:
6151 Value = 5;
6152 break;
6153 }
6154
6155 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
6156 llvm::ConstantInt::get(Int32Ty, Value));
6157}
6158
6159// Generates the IR for the read/write special register builtin,
6160// ValueType is the type of the value that is to be written or read,
6161// RegisterType is the type of the register being written to or read from.
6162static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
6163 const CallExpr *E,
6164 llvm::Type *RegisterType,
6165 llvm::Type *ValueType,
6166 bool IsRead,
6167 StringRef SysReg = "") {
6168 // write and register intrinsics only support 32 and 64 bit operations.
6169 assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
6170 && "Unsupported size for register.");
6171
6172 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6173 CodeGen::CodeGenModule &CGM = CGF.CGM;
6174 LLVMContext &Context = CGM.getLLVMContext();
6175
6176 if (SysReg.empty()) {
6177 const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
6178 SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
6179 }
6180
6181 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
6182 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
6183 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
6184
6185 llvm::Type *Types[] = { RegisterType };
6186
6187 bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
6188 assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
6189 && "Can't fit 64-bit value in 32-bit register");
6190
6191 if (IsRead) {
6192 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
6193 llvm::Value *Call = Builder.CreateCall(F, Metadata);
6194
6195 if (MixedTypes)
6196 // Read into 64 bit register and then truncate result to 32 bit.
6197 return Builder.CreateTrunc(Call, ValueType);
6198
6199 if (ValueType->isPointerTy())
6200 // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
6201 return Builder.CreateIntToPtr(Call, ValueType);
6202
6203 return Call;
6204 }
6205
6206 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
6207 llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
6208 if (MixedTypes) {
6209 // Extend 32 bit write value to 64 bit to pass to write.
6210 ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
6211 return Builder.CreateCall(F, { Metadata, ArgValue });
6212 }
6213
6214 if (ValueType->isPointerTy()) {
6215 // Have VoidPtrTy ArgValue but want to return an i32/i64.
6216 ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
6217 return Builder.CreateCall(F, { Metadata, ArgValue });
6218 }
6219
6220 return Builder.CreateCall(F, { Metadata, ArgValue });
6221}
6222
6223/// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
6224/// argument that specifies the vector type.
6225static bool HasExtraNeonArgument(unsigned BuiltinID) {
6226 switch (BuiltinID) {
6227 default: break;
6228 case NEON::BI__builtin_neon_vget_lane_i8:
6229 case NEON::BI__builtin_neon_vget_lane_i16:
6230 case NEON::BI__builtin_neon_vget_lane_i32:
6231 case NEON::BI__builtin_neon_vget_lane_i64:
6232 case NEON::BI__builtin_neon_vget_lane_f32:
6233 case NEON::BI__builtin_neon_vgetq_lane_i8:
6234 case NEON::BI__builtin_neon_vgetq_lane_i16:
6235 case NEON::BI__builtin_neon_vgetq_lane_i32:
6236 case NEON::BI__builtin_neon_vgetq_lane_i64:
6237 case NEON::BI__builtin_neon_vgetq_lane_f32:
6238 case NEON::BI__builtin_neon_vset_lane_i8:
6239 case NEON::BI__builtin_neon_vset_lane_i16:
6240 case NEON::BI__builtin_neon_vset_lane_i32:
6241 case NEON::BI__builtin_neon_vset_lane_i64:
6242 case NEON::BI__builtin_neon_vset_lane_f32:
6243 case NEON::BI__builtin_neon_vsetq_lane_i8:
6244 case NEON::BI__builtin_neon_vsetq_lane_i16:
6245 case NEON::BI__builtin_neon_vsetq_lane_i32:
6246 case NEON::BI__builtin_neon_vsetq_lane_i64:
6247 case NEON::BI__builtin_neon_vsetq_lane_f32:
6248 case NEON::BI__builtin_neon_vsha1h_u32:
6249 case NEON::BI__builtin_neon_vsha1cq_u32:
6250 case NEON::BI__builtin_neon_vsha1pq_u32:
6251 case NEON::BI__builtin_neon_vsha1mq_u32:
6252 case clang::ARM::BI_MoveToCoprocessor:
6253 case clang::ARM::BI_MoveToCoprocessor2:
6254 return false;
6255 }
6256 return true;
6257}
6258
6259Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
6260 const CallExpr *E,
6261 llvm::Triple::ArchType Arch) {
6262 if (auto Hint = GetValueForARMHint(BuiltinID))
6263 return Hint;
6264 unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
6265
6266 if (BuiltinID == ARM::BI__emit) {
6267 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
6268 llvm::FunctionType *FTy =
6269 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
6270
6271 Expr::EvalResult Result;
6272 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
6273 llvm_unreachable("Sema will ensure that the parameter is constant");
6274
6275 llvm::APSInt Value = Result.Val.getInt();
6276 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
6277
6278 llvm::InlineAsm *Emit =
6279 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
6280 /*SideEffects=*/true)
6281 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
6282 /*SideEffects=*/true);
6283
6284 return Builder.CreateCall(Emit);
6285 }
6286
6287 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
6288 Value *Option = EmitScalarExpr(E->getArg(0));
6289 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
6290 }
6291
6292 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
6293 Value *Address = EmitScalarExpr(E->getArg(0));
6294 Value *RW = EmitScalarExpr(E->getArg(1));
6295 Value *IsData = EmitScalarExpr(E->getArg(2));
6296
6297 // Locality is not supported on ARM target
6298 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
6299
6300 Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
6301 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
6302 }
6303
6304 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
6305 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
6306 return Builder.CreateCall(
6307 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
6308 }
6309
6310 if (BuiltinID == ARM::BI__clear_cache) {
6311 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
6312 const FunctionDecl *FD = E->getDirectCallee();
6313 Value *Ops[2];
6314 for (unsigned i = 0; i < 2; i++)
6315 Ops[i] = EmitScalarExpr(E->getArg(i));
6316 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
6317 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
6318 StringRef Name = FD->getName();
6319 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
6320 }
6321
6322 if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
6323 BuiltinID == ARM::BI__builtin_arm_mcrr2) {
6324 Function *F;
6325
6326 switch (BuiltinID) {
6327 default: llvm_unreachable("unexpected builtin");
6328 case ARM::BI__builtin_arm_mcrr:
6329 F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
6330 break;
6331 case ARM::BI__builtin_arm_mcrr2:
6332 F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
6333 break;
6334 }
6335
6336 // MCRR{2} instruction has 5 operands but
6337 // the intrinsic has 4 because Rt and Rt2
6338 // are represented as a single unsigned 64
6339 // bit integer in the intrinsic definition
6340 // but internally it's represented as 2 32
6341 // bit integers.
6342
6343 Value *Coproc = EmitScalarExpr(E->getArg(0));
6344 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6345 Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
6346 Value *CRm = EmitScalarExpr(E->getArg(3));
6347
6348 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6349 Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
6350 Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
6351 Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
6352
6353 return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
6354 }
6355
6356 if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
6357 BuiltinID == ARM::BI__builtin_arm_mrrc2) {
6358 Function *F;
6359
6360 switch (BuiltinID) {
6361 default: llvm_unreachable("unexpected builtin");
6362 case ARM::BI__builtin_arm_mrrc:
6363 F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
6364 break;
6365 case ARM::BI__builtin_arm_mrrc2:
6366 F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
6367 break;
6368 }
6369
6370 Value *Coproc = EmitScalarExpr(E->getArg(0));
6371 Value *Opc1 = EmitScalarExpr(E->getArg(1));
6372 Value *CRm = EmitScalarExpr(E->getArg(2));
6373 Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
6374
6375 // Returns an unsigned 64 bit integer, represented
6376 // as two 32 bit integers.
6377
6378 Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
6379 Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
6380 Rt = Builder.CreateZExt(Rt, Int64Ty);
6381 Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
6382
6383 Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
6384 RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
6385 RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
6386
6387 return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
6388 }
6389
6390 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
6391 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
6392 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
6393 getContext().getTypeSize(E->getType()) == 64) ||
6394 BuiltinID == ARM::BI__ldrexd) {
6395 Function *F;
6396
6397 switch (BuiltinID) {
6398 default: llvm_unreachable("unexpected builtin");
6399 case ARM::BI__builtin_arm_ldaex:
6400 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
6401 break;
6402 case ARM::BI__builtin_arm_ldrexd:
6403 case ARM::BI__builtin_arm_ldrex:
6404 case ARM::BI__ldrexd:
6405 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
6406 break;
6407 }
6408
6409 Value *LdPtr = EmitScalarExpr(E->getArg(0));
6410 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
6411 "ldrexd");
6412
6413 Value *Val0 = Builder.CreateExtractValue(Val, 1);
6414 Value *Val1 = Builder.CreateExtractValue(Val, 0);
6415 Val0 = Builder.CreateZExt(Val0, Int64Ty);
6416 Val1 = Builder.CreateZExt(Val1, Int64Ty);
6417
6418 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
6419 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
6420 Val = Builder.CreateOr(Val, Val1);
6421 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
6422 }
6423
6424 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
6425 BuiltinID == ARM::BI__builtin_arm_ldaex) {
6426 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
6427
6428 QualType Ty = E->getType();
6429 llvm::Type *RealResTy = ConvertType(Ty);
6430 llvm::Type *PtrTy = llvm::IntegerType::get(
6431 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(DefaultAS);
6432 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
6433
6434 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
6435 ? Intrinsic::arm_ldaex
6436 : Intrinsic::arm_ldrex,
6437 PtrTy);
6438 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
6439
6440 if (RealResTy->isPointerTy())
6441 return Builder.CreateIntToPtr(Val, RealResTy);
6442 else {
6443 llvm::Type *IntResTy = llvm::IntegerType::get(
6444 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
6445 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
6446 return Builder.CreateBitCast(Val, RealResTy);
6447 }
6448 }
6449
6450 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
6451 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
6452 BuiltinID == ARM::BI__builtin_arm_strex) &&
6453 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
6454 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6455 ? Intrinsic::arm_stlexd
6456 : Intrinsic::arm_strexd);
6457 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
6458
6459 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
6460 Value *Val = EmitScalarExpr(E->getArg(0));
6461 Builder.CreateStore(Val, Tmp);
6462
6463 Address LdPtr =
6464 Builder.CreateBitCast(Tmp, llvm::PointerType::get(STy, DefaultAS));
6465 Val = Builder.CreateLoad(LdPtr);
6466
6467 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
6468 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
6469 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
6470 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
6471 }
6472
6473 if (BuiltinID == ARM::BI__builtin_arm_strex ||
6474 BuiltinID == ARM::BI__builtin_arm_stlex) {
6475 Value *StoreVal = EmitScalarExpr(E->getArg(0));
6476 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
6477
6478 QualType Ty = E->getArg(0)->getType();
6479 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
6480 getContext().getTypeSize(Ty));
6481 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo(DefaultAS));
6482
6483 if (StoreVal->getType()->isPointerTy())
6484 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
6485 else {
6486 llvm::Type *IntTy = llvm::IntegerType::get(
6487 getLLVMContext(),
6488 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
6489 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
6490 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
6491 }
6492
6493 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
6494 ? Intrinsic::arm_stlex
6495 : Intrinsic::arm_strex,
6496 StoreAddr->getType());
6497 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
6498 }
6499
6500 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
6501 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
6502 return Builder.CreateCall(F);
6503 }
6504
6505 // CRC32
6506 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
6507 switch (BuiltinID) {
6508 case ARM::BI__builtin_arm_crc32b:
6509 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
6510 case ARM::BI__builtin_arm_crc32cb:
6511 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
6512 case ARM::BI__builtin_arm_crc32h:
6513 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
6514 case ARM::BI__builtin_arm_crc32ch:
6515 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
6516 case ARM::BI__builtin_arm_crc32w:
6517 case ARM::BI__builtin_arm_crc32d:
6518 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
6519 case ARM::BI__builtin_arm_crc32cw:
6520 case ARM::BI__builtin_arm_crc32cd:
6521 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
6522 }
6523
6524 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
6525 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6526 Value *Arg1 = EmitScalarExpr(E->getArg(1));
6527
6528 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
6529 // intrinsics, hence we need different codegen for these cases.
6530 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
6531 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
6532 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
6533 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
6534 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
6535 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
6536
6537 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6538 Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
6539 return Builder.CreateCall(F, {Res, Arg1b});
6540 } else {
6541 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
6542
6543 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
6544 return Builder.CreateCall(F, {Arg0, Arg1});
6545 }
6546 }
6547
6548 if (BuiltinID == ARM::BI__builtin_arm_rsr ||
6549 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6550 BuiltinID == ARM::BI__builtin_arm_rsrp ||
6551 BuiltinID == ARM::BI__builtin_arm_wsr ||
6552 BuiltinID == ARM::BI__builtin_arm_wsr64 ||
6553 BuiltinID == ARM::BI__builtin_arm_wsrp) {
6554
6555 bool IsRead = BuiltinID == ARM::BI__builtin_arm_rsr ||
6556 BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6557 BuiltinID == ARM::BI__builtin_arm_rsrp;
6558
6559 bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
6560 BuiltinID == ARM::BI__builtin_arm_wsrp;
6561
6562 bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
6563 BuiltinID == ARM::BI__builtin_arm_wsr64;
6564
6565 llvm::Type *ValueType;
6566 llvm::Type *RegisterType;
6567 if (IsPointerBuiltin) {
6568 ValueType = VoidPtrTy;
6569 RegisterType = Int32Ty;
6570 } else if (Is64Bit) {
6571 ValueType = RegisterType = Int64Ty;
6572 } else {
6573 ValueType = RegisterType = Int32Ty;
6574 }
6575
6576 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
6577 }
6578
6579 // Find out if any arguments are required to be integer constant
6580 // expressions.
6581 unsigned ICEArguments = 0;
6582 ASTContext::GetBuiltinTypeError Error;
6583 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6584 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6585
6586 auto getAlignmentValue32 = [&](Address addr) -> Value* {
6587 return Builder.getInt32(addr.getAlignment().getQuantity());
6588 };
6589
6590 Address PtrOp0 = Address::invalid();
6591 Address PtrOp1 = Address::invalid();
6592 SmallVector<Value*, 4> Ops;
6593 bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
6594 unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
6595 for (unsigned i = 0, e = NumArgs; i != e; i++) {
6596 if (i == 0) {
6597 switch (BuiltinID) {
6598 case NEON::BI__builtin_neon_vld1_v:
6599 case NEON::BI__builtin_neon_vld1q_v:
6600 case NEON::BI__builtin_neon_vld1q_lane_v:
6601 case NEON::BI__builtin_neon_vld1_lane_v:
6602 case NEON::BI__builtin_neon_vld1_dup_v:
6603 case NEON::BI__builtin_neon_vld1q_dup_v:
6604 case NEON::BI__builtin_neon_vst1_v:
6605 case NEON::BI__builtin_neon_vst1q_v:
6606 case NEON::BI__builtin_neon_vst1q_lane_v:
6607 case NEON::BI__builtin_neon_vst1_lane_v:
6608 case NEON::BI__builtin_neon_vst2_v:
6609 case NEON::BI__builtin_neon_vst2q_v:
6610 case NEON::BI__builtin_neon_vst2_lane_v:
6611 case NEON::BI__builtin_neon_vst2q_lane_v:
6612 case NEON::BI__builtin_neon_vst3_v:
6613 case NEON::BI__builtin_neon_vst3q_v:
6614 case NEON::BI__builtin_neon_vst3_lane_v:
6615 case NEON::BI__builtin_neon_vst3q_lane_v:
6616 case NEON::BI__builtin_neon_vst4_v:
6617 case NEON::BI__builtin_neon_vst4q_v:
6618 case NEON::BI__builtin_neon_vst4_lane_v:
6619 case NEON::BI__builtin_neon_vst4q_lane_v:
6620 // Get the alignment for the argument in addition to the value;
6621 // we'll use it later.
6622 PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
6623 Ops.push_back(PtrOp0.getPointer());
6624 continue;
6625 }
6626 }
6627 if (i == 1) {
6628 switch (BuiltinID) {
6629 case NEON::BI__builtin_neon_vld2_v:
6630 case NEON::BI__builtin_neon_vld2q_v:
6631 case NEON::BI__builtin_neon_vld3_v:
6632 case NEON::BI__builtin_neon_vld3q_v:
6633 case NEON::BI__builtin_neon_vld4_v:
6634 case NEON::BI__builtin_neon_vld4q_v:
6635 case NEON::BI__builtin_neon_vld2_lane_v:
6636 case NEON::BI__builtin_neon_vld2q_lane_v:
6637 case NEON::BI__builtin_neon_vld3_lane_v:
6638 case NEON::BI__builtin_neon_vld3q_lane_v:
6639 case NEON::BI__builtin_neon_vld4_lane_v:
6640 case NEON::BI__builtin_neon_vld4q_lane_v:
6641 case NEON::BI__builtin_neon_vld2_dup_v:
6642 case NEON::BI__builtin_neon_vld2q_dup_v:
6643 case NEON::BI__builtin_neon_vld3_dup_v:
6644 case NEON::BI__builtin_neon_vld3q_dup_v:
6645 case NEON::BI__builtin_neon_vld4_dup_v:
6646 case NEON::BI__builtin_neon_vld4q_dup_v:
6647 // Get the alignment for the argument in addition to the value;
6648 // we'll use it later.
6649 PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
6650 Ops.push_back(PtrOp1.getPointer());
6651 continue;
6652 }
6653 }
6654
6655 if ((ICEArguments & (1 << i)) == 0) {
6656 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6657 } else {
6658 // If this is required to be a constant, constant fold it so that we know
6659 // that the generated intrinsic gets a ConstantInt.
6660 llvm::APSInt Result;
6661 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
6662 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
6663 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
6664 }
6665 }
6666
6667 switch (BuiltinID) {
6668 default: break;
6669
6670 case NEON::BI__builtin_neon_vget_lane_i8:
6671 case NEON::BI__builtin_neon_vget_lane_i16:
6672 case NEON::BI__builtin_neon_vget_lane_i32:
6673 case NEON::BI__builtin_neon_vget_lane_i64:
6674 case NEON::BI__builtin_neon_vget_lane_f32:
6675 case NEON::BI__builtin_neon_vgetq_lane_i8:
6676 case NEON::BI__builtin_neon_vgetq_lane_i16:
6677 case NEON::BI__builtin_neon_vgetq_lane_i32:
6678 case NEON::BI__builtin_neon_vgetq_lane_i64:
6679 case NEON::BI__builtin_neon_vgetq_lane_f32:
6680 return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
6681
6682 case NEON::BI__builtin_neon_vrndns_f32: {
6683 Value *Arg = EmitScalarExpr(E->getArg(0));
6684 llvm::Type *Tys[] = {Arg->getType()};
6685 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
6686 return Builder.CreateCall(F, {Arg}, "vrndn"); }
6687
6688 case NEON::BI__builtin_neon_vset_lane_i8:
6689 case NEON::BI__builtin_neon_vset_lane_i16:
6690 case NEON::BI__builtin_neon_vset_lane_i32:
6691 case NEON::BI__builtin_neon_vset_lane_i64:
6692 case NEON::BI__builtin_neon_vset_lane_f32:
6693 case NEON::BI__builtin_neon_vsetq_lane_i8:
6694 case NEON::BI__builtin_neon_vsetq_lane_i16:
6695 case NEON::BI__builtin_neon_vsetq_lane_i32:
6696 case NEON::BI__builtin_neon_vsetq_lane_i64:
6697 case NEON::BI__builtin_neon_vsetq_lane_f32:
6698 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
6699
6700 case NEON::BI__builtin_neon_vsha1h_u32:
6701 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
6702 "vsha1h");
6703 case NEON::BI__builtin_neon_vsha1cq_u32:
6704 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
6705 "vsha1h");
6706 case NEON::BI__builtin_neon_vsha1pq_u32:
6707 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
6708 "vsha1h");
6709 case NEON::BI__builtin_neon_vsha1mq_u32:
6710 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
6711 "vsha1h");
6712
6713 // The ARM _MoveToCoprocessor builtins put the input register value as
6714 // the first argument, but the LLVM intrinsic expects it as the third one.
6715 case ARM::BI_MoveToCoprocessor:
6716 case ARM::BI_MoveToCoprocessor2: {
6717 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
6718 Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
6719 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
6720 Ops[3], Ops[4], Ops[5]});
6721 }
6722 case ARM::BI_BitScanForward:
6723 case ARM::BI_BitScanForward64:
6724 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
6725 case ARM::BI_BitScanReverse:
6726 case ARM::BI_BitScanReverse64:
6727 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
6728
6729 case ARM::BI_InterlockedAnd64:
6730 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
6731 case ARM::BI_InterlockedExchange64:
6732 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
6733 case ARM::BI_InterlockedExchangeAdd64:
6734 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
6735 case ARM::BI_InterlockedExchangeSub64:
6736 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
6737 case ARM::BI_InterlockedOr64:
6738 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
6739 case ARM::BI_InterlockedXor64:
6740 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
6741 case ARM::BI_InterlockedDecrement64:
6742 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
6743 case ARM::BI_InterlockedIncrement64:
6744 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
6745 case ARM::BI_InterlockedExchangeAdd8_acq:
6746 case ARM::BI_InterlockedExchangeAdd16_acq:
6747 case ARM::BI_InterlockedExchangeAdd_acq:
6748 case ARM::BI_InterlockedExchangeAdd64_acq:
6749 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
6750 case ARM::BI_InterlockedExchangeAdd8_rel:
6751 case ARM::BI_InterlockedExchangeAdd16_rel:
6752 case ARM::BI_InterlockedExchangeAdd_rel:
6753 case ARM::BI_InterlockedExchangeAdd64_rel:
6754 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
6755 case ARM::BI_InterlockedExchangeAdd8_nf:
6756 case ARM::BI_InterlockedExchangeAdd16_nf:
6757 case ARM::BI_InterlockedExchangeAdd_nf:
6758 case ARM::BI_InterlockedExchangeAdd64_nf:
6759 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
6760 case ARM::BI_InterlockedExchange8_acq:
6761 case ARM::BI_InterlockedExchange16_acq:
6762 case ARM::BI_InterlockedExchange_acq:
6763 case ARM::BI_InterlockedExchange64_acq:
6764 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
6765 case ARM::BI_InterlockedExchange8_rel:
6766 case ARM::BI_InterlockedExchange16_rel:
6767 case ARM::BI_InterlockedExchange_rel:
6768 case ARM::BI_InterlockedExchange64_rel:
6769 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
6770 case ARM::BI_InterlockedExchange8_nf:
6771 case ARM::BI_InterlockedExchange16_nf:
6772 case ARM::BI_InterlockedExchange_nf:
6773 case ARM::BI_InterlockedExchange64_nf:
6774 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
6775 case ARM::BI_InterlockedCompareExchange8_acq:
6776 case ARM::BI_InterlockedCompareExchange16_acq:
6777 case ARM::BI_InterlockedCompareExchange_acq:
6778 case ARM::BI_InterlockedCompareExchange64_acq:
6779 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
6780 case ARM::BI_InterlockedCompareExchange8_rel:
6781 case ARM::BI_InterlockedCompareExchange16_rel:
6782 case ARM::BI_InterlockedCompareExchange_rel:
6783 case ARM::BI_InterlockedCompareExchange64_rel:
6784 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
6785 case ARM::BI_InterlockedCompareExchange8_nf:
6786 case ARM::BI_InterlockedCompareExchange16_nf:
6787 case ARM::BI_InterlockedCompareExchange_nf:
6788 case ARM::BI_InterlockedCompareExchange64_nf:
6789 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
6790 case ARM::BI_InterlockedOr8_acq:
6791 case ARM::BI_InterlockedOr16_acq:
6792 case ARM::BI_InterlockedOr_acq:
6793 case ARM::BI_InterlockedOr64_acq:
6794 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
6795 case ARM::BI_InterlockedOr8_rel:
6796 case ARM::BI_InterlockedOr16_rel:
6797 case ARM::BI_InterlockedOr_rel:
6798 case ARM::BI_InterlockedOr64_rel:
6799 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
6800 case ARM::BI_InterlockedOr8_nf:
6801 case ARM::BI_InterlockedOr16_nf:
6802 case ARM::BI_InterlockedOr_nf:
6803 case ARM::BI_InterlockedOr64_nf:
6804 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
6805 case ARM::BI_InterlockedXor8_acq:
6806 case ARM::BI_InterlockedXor16_acq:
6807 case ARM::BI_InterlockedXor_acq:
6808 case ARM::BI_InterlockedXor64_acq:
6809 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
6810 case ARM::BI_InterlockedXor8_rel:
6811 case ARM::BI_InterlockedXor16_rel:
6812 case ARM::BI_InterlockedXor_rel:
6813 case ARM::BI_InterlockedXor64_rel:
6814 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
6815 case ARM::BI_InterlockedXor8_nf:
6816 case ARM::BI_InterlockedXor16_nf:
6817 case ARM::BI_InterlockedXor_nf:
6818 case ARM::BI_InterlockedXor64_nf:
6819 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
6820 case ARM::BI_InterlockedAnd8_acq:
6821 case ARM::BI_InterlockedAnd16_acq:
6822 case ARM::BI_InterlockedAnd_acq:
6823 case ARM::BI_InterlockedAnd64_acq:
6824 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
6825 case ARM::BI_InterlockedAnd8_rel:
6826 case ARM::BI_InterlockedAnd16_rel:
6827 case ARM::BI_InterlockedAnd_rel:
6828 case ARM::BI_InterlockedAnd64_rel:
6829 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
6830 case ARM::BI_InterlockedAnd8_nf:
6831 case ARM::BI_InterlockedAnd16_nf:
6832 case ARM::BI_InterlockedAnd_nf:
6833 case ARM::BI_InterlockedAnd64_nf:
6834 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
6835 case ARM::BI_InterlockedIncrement16_acq:
6836 case ARM::BI_InterlockedIncrement_acq:
6837 case ARM::BI_InterlockedIncrement64_acq:
6838 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
6839 case ARM::BI_InterlockedIncrement16_rel:
6840 case ARM::BI_InterlockedIncrement_rel:
6841 case ARM::BI_InterlockedIncrement64_rel:
6842 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
6843 case ARM::BI_InterlockedIncrement16_nf:
6844 case ARM::BI_InterlockedIncrement_nf:
6845 case ARM::BI_InterlockedIncrement64_nf:
6846 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
6847 case ARM::BI_InterlockedDecrement16_acq:
6848 case ARM::BI_InterlockedDecrement_acq:
6849 case ARM::BI_InterlockedDecrement64_acq:
6850 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
6851 case ARM::BI_InterlockedDecrement16_rel:
6852 case ARM::BI_InterlockedDecrement_rel:
6853 case ARM::BI_InterlockedDecrement64_rel:
6854 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
6855 case ARM::BI_InterlockedDecrement16_nf:
6856 case ARM::BI_InterlockedDecrement_nf:
6857 case ARM::BI_InterlockedDecrement64_nf:
6858 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
6859 }
6860
6861 // Get the last argument, which specifies the vector type.
6862 assert(HasExtraArg);
6863 llvm::APSInt Result;
6864 const Expr *Arg = E->getArg(E->getNumArgs()-1);
6865 if (!Arg->isIntegerConstantExpr(Result, getContext()))
6866 return nullptr;
6867
6868 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
6869 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
6870 // Determine the overloaded type of this builtin.
6871 llvm::Type *Ty;
6872 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
6873 Ty = FloatTy;
6874 else
6875 Ty = DoubleTy;
6876
6877 // Determine whether this is an unsigned conversion or not.
6878 bool usgn = Result.getZExtValue() == 1;
6879 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
6880
6881 // Call the appropriate intrinsic.
6882 Function *F = CGM.getIntrinsic(Int, Ty);
6883 return Builder.CreateCall(F, Ops, "vcvtr");
6884 }
6885
6886 // Determine the type of this overloaded NEON intrinsic.
6887 NeonTypeFlags Type(Result.getZExtValue());
6888 bool usgn = Type.isUnsigned();
6889 bool rightShift = false;
6890
6891 llvm::VectorType *VTy = GetNeonType(this, Type,
6892 getTarget().hasLegalHalfType());
6893 llvm::Type *Ty = VTy;
6894 if (!Ty)
6895 return nullptr;
6896
6897 // Many NEON builtins have identical semantics and uses in ARM and
6898 // AArch64. Emit these in a single function.
6899 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
6900 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
6901 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
6902 if (Builtin)
6903 return EmitCommonNeonBuiltinExpr(
6904 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
6905 Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
6906
6907 unsigned Int;
6908 switch (BuiltinID) {
6909 default: return nullptr;
6910 case NEON::BI__builtin_neon_vld1q_lane_v:
6911 // Handle 64-bit integer elements as a special case. Use shuffles of
6912 // one-element vectors to avoid poor code for i64 in the backend.
6913 if (VTy->getElementType()->isIntegerTy(64)) {
6914 // Extract the other lane.
6915 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6916 uint32_t Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
6917 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
6918 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
6919 // Load the value as a one-element vector.
6920 Ty = llvm::VectorType::get(VTy->getElementType(), 1);
6921 llvm::Type *Tys[] = {Ty, Int8PtrTy};
6922 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
6923 Value *Align = getAlignmentValue32(PtrOp0);
6924 Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
6925 // Combine them.
6926 uint32_t Indices[] = {1 - Lane, Lane};
6927 SV = llvm::ConstantDataVector::get(getLLVMContext(), Indices);
6928 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
6929 }
6930 LLVM_FALLTHROUGH;
6931 case NEON::BI__builtin_neon_vld1_lane_v: {
6932 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6933 PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
6934 Value *Ld = Builder.CreateLoad(PtrOp0);
6935 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
6936 }
6937 case NEON::BI__builtin_neon_vqrshrn_n_v:
6938 Int =
6939 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
6940 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
6941 1, true);
6942 case NEON::BI__builtin_neon_vqrshrun_n_v:
6943 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
6944 Ops, "vqrshrun_n", 1, true);
6945 case NEON::BI__builtin_neon_vqshrn_n_v:
6946 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
6947 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
6948 1, true);
6949 case NEON::BI__builtin_neon_vqshrun_n_v:
6950 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
6951 Ops, "vqshrun_n", 1, true);
6952 case NEON::BI__builtin_neon_vrecpe_v:
6953 case NEON::BI__builtin_neon_vrecpeq_v:
6954 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
6955 Ops, "vrecpe");
6956 case NEON::BI__builtin_neon_vrshrn_n_v:
6957 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
6958 Ops, "vrshrn_n", 1, true);
6959 case NEON::BI__builtin_neon_vrsra_n_v:
6960 case NEON::BI__builtin_neon_vrsraq_n_v:
6961 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6962 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6963 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
6964 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
6965 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
6966 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
6967 case NEON::BI__builtin_neon_vsri_n_v:
6968 case NEON::BI__builtin_neon_vsriq_n_v:
6969 rightShift = true;
6970 LLVM_FALLTHROUGH;
6971 case NEON::BI__builtin_neon_vsli_n_v:
6972 case NEON::BI__builtin_neon_vsliq_n_v:
6973 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
6974 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
6975 Ops, "vsli_n");
6976 case NEON::BI__builtin_neon_vsra_n_v:
6977 case NEON::BI__builtin_neon_vsraq_n_v:
6978 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6979 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
6980 return Builder.CreateAdd(Ops[0], Ops[1]);
6981 case NEON::BI__builtin_neon_vst1q_lane_v:
6982 // Handle 64-bit integer elements as a special case. Use a shuffle to get
6983 // a one-element vector and avoid poor code for i64 in the backend.
6984 if (VTy->getElementType()->isIntegerTy(64)) {
6985 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6986 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
6987 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
6988 Ops[2] = getAlignmentValue32(PtrOp0);
6989 llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
6990 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
6991 Tys), Ops);
6992 }
6993 LLVM_FALLTHROUGH;
6994 case NEON::BI__builtin_neon_vst1_lane_v: {
6995 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6996 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
6997 Ty = llvm::PointerType::get(Ops[1]->getType(), DefaultAS);
6998 auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
6999 return St;
7000 }
7001 case NEON::BI__builtin_neon_vtbl1_v:
7002 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
7003 Ops, "vtbl1");
7004 case NEON::BI__builtin_neon_vtbl2_v:
7005 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
7006 Ops, "vtbl2");
7007 case NEON::BI__builtin_neon_vtbl3_v:
7008 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
7009 Ops, "vtbl3");
7010 case NEON::BI__builtin_neon_vtbl4_v:
7011 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
7012 Ops, "vtbl4");
7013 case NEON::BI__builtin_neon_vtbx1_v:
7014 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
7015 Ops, "vtbx1");
7016 case NEON::BI__builtin_neon_vtbx2_v:
7017 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
7018 Ops, "vtbx2");
7019 case NEON::BI__builtin_neon_vtbx3_v:
7020 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
7021 Ops, "vtbx3");
7022 case NEON::BI__builtin_neon_vtbx4_v:
7023 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
7024 Ops, "vtbx4");
7025 }
7026}
7027
7028static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
7029 const CallExpr *E,
7030 SmallVectorImpl<Value *> &Ops,
7031 llvm::Triple::ArchType Arch) {
7032 unsigned int Int = 0;
7033 const char *s = nullptr;
7034
7035 switch (BuiltinID) {
7036 default:
7037 return nullptr;
7038 case NEON::BI__builtin_neon_vtbl1_v:
7039 case NEON::BI__builtin_neon_vqtbl1_v:
7040 case NEON::BI__builtin_neon_vqtbl1q_v:
7041 case NEON::BI__builtin_neon_vtbl2_v:
7042 case NEON::BI__builtin_neon_vqtbl2_v:
7043 case NEON::BI__builtin_neon_vqtbl2q_v:
7044 case NEON::BI__builtin_neon_vtbl3_v:
7045 case NEON::BI__builtin_neon_vqtbl3_v:
7046 case NEON::BI__builtin_neon_vqtbl3q_v:
7047 case NEON::BI__builtin_neon_vtbl4_v:
7048 case NEON::BI__builtin_neon_vqtbl4_v:
7049 case NEON::BI__builtin_neon_vqtbl4q_v:
7050 break;
7051 case NEON::BI__builtin_neon_vtbx1_v:
7052 case NEON::BI__builtin_neon_vqtbx1_v:
7053 case NEON::BI__builtin_neon_vqtbx1q_v:
7054 case NEON::BI__builtin_neon_vtbx2_v:
7055 case NEON::BI__builtin_neon_vqtbx2_v:
7056 case NEON::BI__builtin_neon_vqtbx2q_v:
7057 case NEON::BI__builtin_neon_vtbx3_v:
7058 case NEON::BI__builtin_neon_vqtbx3_v:
7059 case NEON::BI__builtin_neon_vqtbx3q_v:
7060 case NEON::BI__builtin_neon_vtbx4_v:
7061 case NEON::BI__builtin_neon_vqtbx4_v:
7062 case NEON::BI__builtin_neon_vqtbx4q_v:
7063 break;
7064 }
7065
7066 assert(E->getNumArgs() >= 3);
7067
7068 // Get the last argument, which specifies the vector type.
7069 llvm::APSInt Result;
7070 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
7071 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
7072 return nullptr;
7073
7074 // Determine the type of this overloaded NEON intrinsic.
7075 NeonTypeFlags Type(Result.getZExtValue());
7076 llvm::VectorType *Ty = GetNeonType(&CGF, Type);
7077 if (!Ty)
7078 return nullptr;
7079
7080 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7081
7082 // AArch64 scalar builtins are not overloaded, they do not have an extra
7083 // argument that specifies the vector type, need to handle each case.
7084 switch (BuiltinID) {
7085 case NEON::BI__builtin_neon_vtbl1_v: {
7086 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
7087 Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
7088 "vtbl1");
7089 }
7090 case NEON::BI__builtin_neon_vtbl2_v: {
7091 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
7092 Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
7093 "vtbl1");
7094 }
7095 case NEON::BI__builtin_neon_vtbl3_v: {
7096 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
7097 Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
7098 "vtbl2");
7099 }
7100 case NEON::BI__builtin_neon_vtbl4_v: {
7101 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
7102 Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
7103 "vtbl2");
7104 }
7105 case NEON::BI__builtin_neon_vtbx1_v: {
7106 Value *TblRes =
7107 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
7108 Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
7109
7110 llvm::Constant *EightV = ConstantInt::get(Ty, 8);
7111 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
7112 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7113
7114 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7115 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7116 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7117 }
7118 case NEON::BI__builtin_neon_vtbx2_v: {
7119 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
7120 Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
7121 "vtbx1");
7122 }
7123 case NEON::BI__builtin_neon_vtbx3_v: {
7124 Value *TblRes =
7125 packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
7126 Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
7127
7128 llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
7129 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
7130 TwentyFourV);
7131 CmpRes = Builder.CreateSExt(CmpRes, Ty);
7132
7133 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
7134 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
7135 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
7136 }
7137 case NEON::BI__builtin_neon_vtbx4_v: {
7138 return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
7139 Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
7140 "vtbx2");
7141 }
7142 case NEON::BI__builtin_neon_vqtbl1_v:
7143 case NEON::BI__builtin_neon_vqtbl1q_v:
7144 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
7145 case NEON::BI__builtin_neon_vqtbl2_v:
7146 case NEON::BI__builtin_neon_vqtbl2q_v: {
7147 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
7148 case NEON::BI__builtin_neon_vqtbl3_v:
7149 case NEON::BI__builtin_neon_vqtbl3q_v:
7150 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
7151 case NEON::BI__builtin_neon_vqtbl4_v:
7152 case NEON::BI__builtin_neon_vqtbl4q_v:
7153 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
7154 case NEON::BI__builtin_neon_vqtbx1_v:
7155 case NEON::BI__builtin_neon_vqtbx1q_v:
7156 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
7157 case NEON::BI__builtin_neon_vqtbx2_v:
7158 case NEON::BI__builtin_neon_vqtbx2q_v:
7159 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
7160 case NEON::BI__builtin_neon_vqtbx3_v:
7161 case NEON::BI__builtin_neon_vqtbx3q_v:
7162 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
7163 case NEON::BI__builtin_neon_vqtbx4_v:
7164 case NEON::BI__builtin_neon_vqtbx4q_v:
7165 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
7166 }
7167 }
7168
7169 if (!Int)
7170 return nullptr;
7171
7172 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
7173 return CGF.EmitNeonCall(F, Ops, s);
7174}
7175
7176Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
7177 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
7178 Op = Builder.CreateBitCast(Op, Int16Ty);
7179 Value *V = UndefValue::get(VTy);
7180 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
7181 Op = Builder.CreateInsertElement(V, Op, CI);
7182 return Op;
7183}
7184
7185Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
7186 const CallExpr *E,
7187 llvm::Triple::ArchType Arch) {
7188 unsigned HintID = static_cast<unsigned>(-1);
7189 unsigned DefaultAS = CGM.getTargetCodeGenInfo().getDefaultAS();
7190 switch (BuiltinID) {
7191 default: break;
7192 case AArch64::BI__builtin_arm_nop:
7193 HintID = 0;
7194 break;
7195 case AArch64::BI__builtin_arm_yield:
7196 case AArch64::BI__yield:
7197 HintID = 1;
7198 break;
7199 case AArch64::BI__builtin_arm_wfe:
7200 case AArch64::BI__wfe:
7201 HintID = 2;
7202 break;
7203 case AArch64::BI__builtin_arm_wfi:
7204 case AArch64::BI__wfi:
7205 HintID = 3;
7206 break;
7207 case AArch64::BI__builtin_arm_sev:
7208 case AArch64::BI__sev:
7209 HintID = 4;
7210 break;
7211 case AArch64::BI__builtin_arm_sevl:
7212 case AArch64::BI__sevl:
7213 HintID = 5;
7214 break;
7215 }
7216
7217 if (HintID != static_cast<unsigned>(-1)) {
7218 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
7219 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
7220 }
7221
7222 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
7223 Value *Address = EmitScalarExpr(E->getArg(0));
7224 Value *RW = EmitScalarExpr(E->getArg(1));
7225 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
7226 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
7227 Value *IsData = EmitScalarExpr(E->getArg(4));
7228
7229 Value *Locality = nullptr;
7230 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
7231 // Temporal fetch, needs to convert cache level to locality.
7232 Locality = llvm::ConstantInt::get(Int32Ty,
7233 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
7234 } else {
7235 // Streaming fetch.
7236 Locality = llvm::ConstantInt::get(Int32Ty, 0);
7237 }
7238
7239 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
7240 // PLDL3STRM or PLDL2STRM.
7241 Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
7242 return Builder.CreateCall(F, {Address, RW, Locality, IsData});
7243 }
7244
7245 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
7246 assert((getContext().getTypeSize(E->getType()) == 32) &&
7247 "rbit of unusual size!");
7248 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7249 return Builder.CreateCall(
7250 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7251 }
7252 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
7253 assert((getContext().getTypeSize(E->getType()) == 64) &&
7254 "rbit of unusual size!");
7255 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7256 return Builder.CreateCall(
7257 CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7258 }
7259
7260 if (BuiltinID == AArch64::BI__clear_cache) {
7261 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
7262 const FunctionDecl *FD = E->getDirectCallee();
7263 Value *Ops[2];
7264 for (unsigned i = 0; i < 2; i++)
7265 Ops[i] = EmitScalarExpr(E->getArg(i));
7266 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
7267 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
7268 StringRef Name = FD->getName();
7269 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
7270 }
7271
7272 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
7273 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
7274 getContext().getTypeSize(E->getType()) == 128) {
7275 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
7276 ? Intrinsic::aarch64_ldaxp
7277 : Intrinsic::aarch64_ldxp);
7278
7279 Value *LdPtr = EmitScalarExpr(E->getArg(0));
7280 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
7281 "ldxp");
7282
7283 Value *Val0 = Builder.CreateExtractValue(Val, 1);
7284 Value *Val1 = Builder.CreateExtractValue(Val, 0);
7285 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
7286 Val0 = Builder.CreateZExt(Val0, Int128Ty);
7287 Val1 = Builder.CreateZExt(Val1, Int128Ty);
7288
7289 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
7290 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
7291 Val = Builder.CreateOr(Val, Val1);
7292 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
7293 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
7294 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
7295 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
7296
7297 QualType Ty = E->getType();
7298 llvm::Type *RealResTy = ConvertType(Ty);
7299 llvm::Type *PtrTy = llvm::IntegerType::get(
7300 getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo(DefaultAS);
7301 LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
7302
7303 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
7304 ? Intrinsic::aarch64_ldaxr
7305 : Intrinsic::aarch64_ldxr,
7306 PtrTy);
7307 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
7308
7309 if (RealResTy->isPointerTy())
7310 return Builder.CreateIntToPtr(Val, RealResTy);
7311
7312 llvm::Type *IntResTy = llvm::IntegerType::get(
7313 getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
7314 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
7315 return Builder.CreateBitCast(Val, RealResTy);
7316 }
7317
7318 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
7319 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
7320 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
7321 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
7322 ? Intrinsic::aarch64_stlxp
7323 : Intrinsic::aarch64_stxp);
7324 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
7325
7326 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
7327 EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
7328
7329 Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::get(STy, DefaultAS));
7330 llvm::Value *Val = Builder.CreateLoad(Tmp);
7331
7332 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
7333 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
7334 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
7335 Int8PtrTy);
7336 return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
7337 }
7338
7339 if (BuiltinID == AArch64::BI__builtin_arm_strex ||
7340 BuiltinID == AArch64::BI__builtin_arm_stlex) {
7341 Value *StoreVal = EmitScalarExpr(E->getArg(0));
7342 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
7343
7344 QualType Ty = E->getArg(0)->getType();
7345 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
7346 getContext().getTypeSize(Ty));
7347 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo(DefaultAS));
7348
7349 if (StoreVal->getType()->isPointerTy())
7350 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
7351 else {
7352 llvm::Type *IntTy = llvm::IntegerType::get(
7353 getLLVMContext(),
7354 CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
7355 StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
7356 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
7357 }
7358
7359 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
7360 ? Intrinsic::aarch64_stlxr
7361 : Intrinsic::aarch64_stxr,
7362 StoreAddr->getType());
7363 return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
7364 }
7365
7366 if (BuiltinID == AArch64::BI__getReg) {
7367 Expr::EvalResult Result;
7368 if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
7369 llvm_unreachable("Sema will ensure that the parameter is constant");
7370
7371 llvm::APSInt Value = Result.Val.getInt();
7372 LLVMContext &Context = CGM.getLLVMContext();
7373 std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
7374
7375 llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
7376 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7377 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7378
7379 llvm::Function *F =
7380 CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
7381 return Builder.CreateCall(F, Metadata);
7382 }
7383
7384 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
7385 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
7386 return Builder.CreateCall(F);
7387 }
7388
7389 if (BuiltinID == AArch64::BI_ReadWriteBarrier)
7390 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
7391 llvm::SyncScope::SingleThread);
7392
7393 // CRC32
7394 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
7395 switch (BuiltinID) {
7396 case AArch64::BI__builtin_arm_crc32b:
7397 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
7398 case AArch64::BI__builtin_arm_crc32cb:
7399 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
7400 case AArch64::BI__builtin_arm_crc32h:
7401 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
7402 case AArch64::BI__builtin_arm_crc32ch:
7403 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
7404 case AArch64::BI__builtin_arm_crc32w:
7405 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
7406 case AArch64::BI__builtin_arm_crc32cw:
7407 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
7408 case AArch64::BI__builtin_arm_crc32d:
7409 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
7410 case AArch64::BI__builtin_arm_crc32cd:
7411 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
7412 }
7413
7414 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
7415 Value *Arg0 = EmitScalarExpr(E->getArg(0));
7416 Value *Arg1 = EmitScalarExpr(E->getArg(1));
7417 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7418
7419 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
7420 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
7421
7422 return Builder.CreateCall(F, {Arg0, Arg1});
7423 }
7424
7425 // Memory Tagging Extensions (MTE) Intrinsics
7426 Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
7427 switch (BuiltinID) {
7428 case AArch64::BI__builtin_arm_irg:
7429 MTEIntrinsicID = Intrinsic::aarch64_irg; break;
7430 case AArch64::BI__builtin_arm_addg:
7431 MTEIntrinsicID = Intrinsic::aarch64_addg; break;
7432 case AArch64::BI__builtin_arm_gmi:
7433 MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
7434 case AArch64::BI__builtin_arm_ldg:
7435 MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
7436 case AArch64::BI__builtin_arm_stg:
7437 MTEIntrinsicID = Intrinsic::aarch64_stg; break;
7438 case AArch64::BI__builtin_arm_subp:
7439 MTEIntrinsicID = Intrinsic::aarch64_subp; break;
7440 }
7441
7442 if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
7443 llvm::Type *T = ConvertType(E->getType());
7444
7445 if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
7446 Value *Pointer = EmitScalarExpr(E->getArg(0));
7447 Value *Mask = EmitScalarExpr(E->getArg(1));
7448
7449 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7450 Mask = Builder.CreateZExt(Mask, Int64Ty);
7451 Value *RV = Builder.CreateCall(
7452 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
7453 return Builder.CreatePointerCast(RV, T);
7454 }
7455 if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
7456 Value *Pointer = EmitScalarExpr(E->getArg(0));
7457 Value *TagOffset = EmitScalarExpr(E->getArg(1));
7458
7459 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7460 TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
7461 Value *RV = Builder.CreateCall(
7462 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
7463 return Builder.CreatePointerCast(RV, T);
7464 }
7465 if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
7466 Value *Pointer = EmitScalarExpr(E->getArg(0));
7467 Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
7468
7469 ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
7470 Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
7471 return Builder.CreateCall(
7472 CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
7473 }
7474 // Although it is possible to supply a different return
7475 // address (first arg) to this intrinsic, for now we set
7476 // return address same as input address.
7477 if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
7478 Value *TagAddress = EmitScalarExpr(E->getArg(0));
7479 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
7480 Value *RV = Builder.CreateCall(
7481 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
7482 return Builder.CreatePointerCast(RV, T);
7483 }
7484 // Although it is possible to supply a different tag (to set)
7485 // to this intrinsic (as first arg), for now we supply
7486 // the tag that is in input address arg (common use case).
7487 if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
7488 Value *TagAddress = EmitScalarExpr(E->getArg(0));
7489 TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
7490 return Builder.CreateCall(
7491 CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
7492 }
7493 if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
7494 Value *PointerA = EmitScalarExpr(E->getArg(0));
7495 Value *PointerB = EmitScalarExpr(E->getArg(1));
7496 PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
7497 PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
7498 return Builder.CreateCall(
7499 CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
7500 }
7501 }
7502
7503 if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
7504 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
7505 BuiltinID == AArch64::BI__builtin_arm_rsrp ||
7506 BuiltinID == AArch64::BI__builtin_arm_wsr ||
7507 BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
7508 BuiltinID == AArch64::BI__builtin_arm_wsrp) {
7509
7510 bool IsRead = BuiltinID == AArch64::BI__builtin_arm_rsr ||
7511 BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
7512 BuiltinID == AArch64::BI__builtin_arm_rsrp;
7513
7514 bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
7515 BuiltinID == AArch64::BI__builtin_arm_wsrp;
7516
7517 bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
7518 BuiltinID != AArch64::BI__builtin_arm_wsr;
7519
7520 llvm::Type *ValueType;
7521 llvm::Type *RegisterType = Int64Ty;
7522 if (IsPointerBuiltin) {
7523 ValueType = VoidPtrTy;
7524 } else if (Is64Bit) {
7525 ValueType = Int64Ty;
7526 } else {
7527 ValueType = Int32Ty;
7528 }
7529
7530 return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType, IsRead);
7531 }
7532
7533 if (BuiltinID == AArch64::BI_ReadStatusReg ||
7534 BuiltinID == AArch64::BI_WriteStatusReg) {
7535 LLVMContext &Context = CGM.getLLVMContext();
7536
7537 unsigned SysReg =
7538 E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
7539
7540 std::string SysRegStr;
7541 llvm::raw_string_ostream(SysRegStr) <<
7542 ((1 << 1) | ((SysReg >> 14) & 1)) << ":" <<
7543 ((SysReg >> 11) & 7) << ":" <<
7544 ((SysReg >> 7) & 15) << ":" <<
7545 ((SysReg >> 3) & 15) << ":" <<
7546 ( SysReg & 7);
7547
7548 llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
7549 llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7550 llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7551
7552 llvm::Type *RegisterType = Int64Ty;
7553 llvm::Type *Types[] = { RegisterType };
7554
7555 if (BuiltinID == AArch64::BI_ReadStatusReg) {
7556 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
7557
7558 return Builder.CreateCall(F, Metadata);
7559 }
7560
7561 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
7562 llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
7563
7564 return Builder.CreateCall(F, { Metadata, ArgValue });
7565 }
7566
7567 if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
7568 llvm::Function *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress,
7569 {CGM.ProgramInt8PtrTy});
7570 return Builder.CreateCall(F);
7571 }
7572
7573 if (BuiltinID == AArch64::BI__builtin_sponentry) {
7574 llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry,
7575 {CGM.ProgramInt8PtrTy});
7576 return Builder.CreateCall(F);
7577 }
7578
7579 // Find out if any arguments are required to be integer constant
7580 // expressions.
7581 unsigned ICEArguments = 0;
7582 ASTContext::GetBuiltinTypeError Error;
7583 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
7584 assert(Error == ASTContext::GE_None && "Should not codegen an error");
7585
7586 llvm::SmallVector<Value*, 4> Ops;
7587 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
7588 if ((ICEArguments & (1 << i)) == 0) {
7589 Ops.push_back(EmitScalarExpr(E->getArg(i)));
7590 } else {
7591 // If this is required to be a constant, constant fold it so that we know
7592 // that the generated intrinsic gets a ConstantInt.
7593 llvm::APSInt Result;
7594 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
7595 assert(IsConst && "Constant arg isn't actually constant?");
7596 (void)IsConst;
7597 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
7598 }
7599 }
7600
7601 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
7602 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
7603 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
7604
7605 if (Builtin) {
7606 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
7607 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
7608 assert(Result && "SISD intrinsic should have been handled");
7609 return Result;
7610 }
7611
7612 llvm::APSInt Result;
7613 const Expr *Arg = E->getArg(E->getNumArgs()-1);
7614 NeonTypeFlags Type(0);
7615 if (Arg->isIntegerConstantExpr(Result, getContext()))
7616 // Determine the type of this overloaded NEON intrinsic.
7617 Type = NeonTypeFlags(Result.getZExtValue());
7618
7619 bool usgn = Type.isUnsigned();
7620 bool quad = Type.isQuad();
7621
7622 // Handle non-overloaded intrinsics first.
7623 switch (BuiltinID) {
7624 default: break;
7625 case NEON::BI__builtin_neon_vabsh_f16:
7626 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7627 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
7628 case NEON::BI__builtin_neon_vldrq_p128: {
7629 llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
7630 llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
7631 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
7632 return Builder.CreateAlignedLoad(Int128Ty, Ptr,
7633 CharUnits::fromQuantity(16));
7634 }
7635 case NEON::BI__builtin_neon_vstrq_p128: {
7636 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128,
7637 DefaultAS);
7638 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
7639 return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
7640 }
7641 case NEON::BI__builtin_neon_vcvts_u32_f32:
7642 case NEON::BI__builtin_neon_vcvtd_u64_f64:
7643 usgn = true;
7644 LLVM_FALLTHROUGH;
7645 case NEON::BI__builtin_neon_vcvts_s32_f32:
7646 case NEON::BI__builtin_neon_vcvtd_s64_f64: {
7647 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7648 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
7649 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
7650 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
7651 Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
7652 if (usgn)
7653 return Builder.CreateFPToUI(Ops[0], InTy);
7654 return Builder.CreateFPToSI(Ops[0], InTy);
7655 }
7656 case NEON::BI__builtin_neon_vcvts_f32_u32:
7657 case NEON::BI__builtin_neon_vcvtd_f64_u64:
7658 usgn = true;
7659 LLVM_FALLTHROUGH;
7660 case NEON::BI__builtin_neon_vcvts_f32_s32:
7661 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
7662 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7663 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
7664 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
7665 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
7666 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
7667 if (usgn)
7668 return Builder.CreateUIToFP(Ops[0], FTy);
7669 return Builder.CreateSIToFP(Ops[0], FTy);
7670 }
7671 case NEON::BI__builtin_neon_vcvth_f16_u16:
7672 case NEON::BI__builtin_neon_vcvth_f16_u32:
7673 case NEON::BI__builtin_neon_vcvth_f16_u64:
7674 usgn = true;
7675 LLVM_FALLTHROUGH;
7676 case NEON::BI__builtin_neon_vcvth_f16_s16:
7677 case NEON::BI__builtin_neon_vcvth_f16_s32:
7678 case NEON::BI__builtin_neon_vcvth_f16_s64: {
7679 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7680 llvm::Type *FTy = HalfTy;
7681 llvm::Type *InTy;
7682 if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
7683 InTy = Int64Ty;
7684 else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
7685 InTy = Int32Ty;
7686 else
7687 InTy = Int16Ty;
7688 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
7689 if (usgn)
7690 return Builder.CreateUIToFP(Ops[0], FTy);
7691 return Builder.CreateSIToFP(Ops[0], FTy);
7692 }
7693 case NEON::BI__builtin_neon_vcvth_u16_f16:
7694 usgn = true;
7695 LLVM_FALLTHROUGH;
7696 case NEON::BI__builtin_neon_vcvth_s16_f16: {
7697 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7698 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7699 if (usgn)
7700 return Builder.CreateFPToUI(Ops[0], Int16Ty);
7701 return Builder.CreateFPToSI(Ops[0], Int16Ty);
7702 }
7703 case NEON::BI__builtin_neon_vcvth_u32_f16:
7704 usgn = true;
7705 LLVM_FALLTHROUGH;
7706 case NEON::BI__builtin_neon_vcvth_s32_f16: {
7707 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7708 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7709 if (usgn)
7710 return Builder.CreateFPToUI(Ops[0], Int32Ty);
7711 return Builder.CreateFPToSI(Ops[0], Int32Ty);
7712 }
7713 case NEON::BI__builtin_neon_vcvth_u64_f16:
7714 usgn = true;
7715 LLVM_FALLTHROUGH;
7716 case NEON::BI__builtin_neon_vcvth_s64_f16: {
7717 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7718 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7719 if (usgn)
7720 return Builder.CreateFPToUI(Ops[0], Int64Ty);
7721 return Builder.CreateFPToSI(Ops[0], Int64Ty);
7722 }
7723 case NEON::BI__builtin_neon_vcvtah_u16_f16:
7724 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
7725 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
7726 case NEON::BI__builtin_neon_vcvtph_u16_f16:
7727 case NEON::BI__builtin_neon_vcvtah_s16_f16:
7728 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
7729 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
7730 case NEON::BI__builtin_neon_vcvtph_s16_f16: {
7731 unsigned Int;
7732 llvm::Type* InTy = Int32Ty;
7733 llvm::Type* FTy = HalfTy;
7734 llvm::Type *Tys[2] = {InTy, FTy};
7735 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7736 switch (BuiltinID) {
7737 default: llvm_unreachable("missing builtin ID in switch!");
7738 case NEON::BI__builtin_neon_vcvtah_u16_f16:
7739 Int = Intrinsic::aarch64_neon_fcvtau; break;
7740 case NEON::BI__builtin_neon_vcvtmh_u16_f16:
7741 Int = Intrinsic::aarch64_neon_fcvtmu; break;
7742 case NEON::BI__builtin_neon_vcvtnh_u16_f16:
7743 Int = Intrinsic::aarch64_neon_fcvtnu; break;
7744 case NEON::BI__builtin_neon_vcvtph_u16_f16:
7745 Int = Intrinsic::aarch64_neon_fcvtpu; break;
7746 case NEON::BI__builtin_neon_vcvtah_s16_f16:
7747 Int = Intrinsic::aarch64_neon_fcvtas; break;
7748 case NEON::BI__builtin_neon_vcvtmh_s16_f16:
7749 Int = Intrinsic::aarch64_neon_fcvtms; break;
7750 case NEON::BI__builtin_neon_vcvtnh_s16_f16:
7751 Int = Intrinsic::aarch64_neon_fcvtns; break;
7752 case NEON::BI__builtin_neon_vcvtph_s16_f16:
7753 Int = Intrinsic::aarch64_neon_fcvtps; break;
7754 }
7755 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
7756 return Builder.CreateTrunc(Ops[0], Int16Ty);
7757 }
7758 case NEON::BI__builtin_neon_vcaleh_f16:
7759 case NEON::BI__builtin_neon_vcalth_f16:
7760 case NEON::BI__builtin_neon_vcageh_f16:
7761 case NEON::BI__builtin_neon_vcagth_f16: {
7762 unsigned Int;
7763 llvm::Type* InTy = Int32Ty;
7764 llvm::Type* FTy = HalfTy;
7765 llvm::Type *Tys[2] = {InTy, FTy};
7766 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7767 switch (BuiltinID) {
7768 default: llvm_unreachable("missing builtin ID in switch!");
7769 case NEON::BI__builtin_neon_vcageh_f16:
7770 Int = Intrinsic::aarch64_neon_facge; break;
7771 case NEON::BI__builtin_neon_vcagth_f16:
7772 Int = Intrinsic::aarch64_neon_facgt; break;
7773 case NEON::BI__builtin_neon_vcaleh_f16:
7774 Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
7775 case NEON::BI__builtin_neon_vcalth_f16:
7776 Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
7777 }
7778 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
7779 return Builder.CreateTrunc(Ops[0], Int16Ty);
7780 }
7781 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
7782 case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
7783 unsigned Int;
7784 llvm::Type* InTy = Int32Ty;
7785 llvm::Type* FTy = HalfTy;
7786 llvm::Type *Tys[2] = {InTy, FTy};
7787 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7788 switch (BuiltinID) {
7789 default: llvm_unreachable("missing builtin ID in switch!");
7790 case NEON::BI__builtin_neon_vcvth_n_s16_f16:
7791 Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
7792 case NEON::BI__builtin_neon_vcvth_n_u16_f16:
7793 Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
7794 }
7795 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
7796 return Builder.CreateTrunc(Ops[0], Int16Ty);
7797 }
7798 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
7799 case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
7800 unsigned Int;
7801 llvm::Type* FTy = HalfTy;
7802 llvm::Type* InTy = Int32Ty;
7803 llvm::Type *Tys[2] = {FTy, InTy};
7804 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7805 switch (BuiltinID) {
7806 default: llvm_unreachable("missing builtin ID in switch!");
7807 case NEON::BI__builtin_neon_vcvth_n_f16_s16:
7808 Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
7809 Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
7810 break;
7811 case NEON::BI__builtin_neon_vcvth_n_f16_u16:
7812 Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
7813 Ops[0] = Builder.CreateZExt(Ops[0], InTy);
7814 break;
7815 }
7816 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
7817 }
7818 case NEON::BI__builtin_neon_vpaddd_s64: {
7819 llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2);
7820 Value *Vec = EmitScalarExpr(E->getArg(0));
7821 // The vector is v2f64, so make sure it's bitcast to that.
7822 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
7823 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7824 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7825 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7826 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7827 // Pairwise addition of a v2f64 into a scalar f64.
7828 return Builder.CreateAdd(Op0, Op1, "vpaddd");
7829 }
7830 case NEON::BI__builtin_neon_vpaddd_f64: {
7831 llvm::Type *Ty =
7832 llvm::VectorType::get(DoubleTy, 2);
7833 Value *Vec = EmitScalarExpr(E->getArg(0));
7834 // The vector is v2f64, so make sure it's bitcast to that.
7835 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
7836 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7837 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7838 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7839 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7840 // Pairwise addition of a v2f64 into a scalar f64.
7841 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
7842 }
7843 case NEON::BI__builtin_neon_vpadds_f32: {
7844 llvm::Type *Ty =
7845 llvm::VectorType::get(FloatTy, 2);
7846 Value *Vec = EmitScalarExpr(E->getArg(0));
7847 // The vector is v2f32, so make sure it's bitcast to that.
7848 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
7849 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
7850 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
7851 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
7852 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
7853 // Pairwise addition of a v2f32 into a scalar f32.
7854 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
7855 }
7856 case NEON::BI__builtin_neon_vceqzd_s64:
7857 case NEON::BI__builtin_neon_vceqzd_f64:
7858 case NEON::BI__builtin_neon_vceqzs_f32:
7859 case NEON::BI__builtin_neon_vceqzh_f16:
7860 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7861 return EmitAArch64CompareBuiltinExpr(
7862 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7863 ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
7864 case NEON::BI__builtin_neon_vcgezd_s64:
7865 case NEON::BI__builtin_neon_vcgezd_f64:
7866 case NEON::BI__builtin_neon_vcgezs_f32:
7867 case NEON::BI__builtin_neon_vcgezh_f16:
7868 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7869 return EmitAArch64CompareBuiltinExpr(
7870 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7871 ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
7872 case NEON::BI__builtin_neon_vclezd_s64:
7873 case NEON::BI__builtin_neon_vclezd_f64:
7874 case NEON::BI__builtin_neon_vclezs_f32:
7875 case NEON::BI__builtin_neon_vclezh_f16:
7876 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7877 return EmitAArch64CompareBuiltinExpr(
7878 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7879 ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
7880 case NEON::BI__builtin_neon_vcgtzd_s64:
7881 case NEON::BI__builtin_neon_vcgtzd_f64:
7882 case NEON::BI__builtin_neon_vcgtzs_f32:
7883 case NEON::BI__builtin_neon_vcgtzh_f16:
7884 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7885 return EmitAArch64CompareBuiltinExpr(
7886 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7887 ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
7888 case NEON::BI__builtin_neon_vcltzd_s64:
7889 case NEON::BI__builtin_neon_vcltzd_f64:
7890 case NEON::BI__builtin_neon_vcltzs_f32:
7891 case NEON::BI__builtin_neon_vcltzh_f16:
7892 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7893 return EmitAArch64CompareBuiltinExpr(
7894 Ops[0], ConvertType(E->getCallReturnType(getContext())),
7895 ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
7896
7897 case NEON::BI__builtin_neon_vceqzd_u64: {
7898 Ops.push_back(EmitScalarExpr(E->getArg(0)));
7899 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
7900 Ops[0] =
7901 Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
7902 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
7903 }
7904 case NEON::BI__builtin_neon_vceqd_f64:
7905 case NEON::BI__builtin_neon_vcled_f64:
7906 case NEON::BI__builtin_neon_vcltd_f64:
7907 case NEON::BI__builtin_neon_vcged_f64:
7908 case NEON::BI__builtin_neon_vcgtd_f64: {
7909 llvm::CmpInst::Predicate P;
7910 switch (BuiltinID) {
7911 default: llvm_unreachable("missing builtin ID in switch!");
7912 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
7913 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
7914 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
7915 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
7916 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
7917 }
7918 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7919 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
7920 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
7921 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
7922 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
7923 }
7924 case NEON::BI__builtin_neon_vceqs_f32:
7925 case NEON::BI__builtin_neon_vcles_f32:
7926 case NEON::BI__builtin_neon_vclts_f32:
7927 case NEON::BI__builtin_neon_vcges_f32:
7928 case NEON::BI__builtin_neon_vcgts_f32: {
7929 llvm::CmpInst::Predicate P;
7930 switch (BuiltinID) {
7931 default: llvm_unreachable("missing builtin ID in switch!");
7932 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
7933 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
7934 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
7935 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
7936 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
7937 }
7938 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7939 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
7940 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
7941 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
7942 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
7943 }
7944 case NEON::BI__builtin_neon_vceqh_f16:
7945 case NEON::BI__builtin_neon_vcleh_f16:
7946 case NEON::BI__builtin_neon_vclth_f16:
7947 case NEON::BI__builtin_neon_vcgeh_f16:
7948 case NEON::BI__builtin_neon_vcgth_f16: {
7949 llvm::CmpInst::Predicate P;
7950 switch (BuiltinID) {
7951 default: llvm_unreachable("missing builtin ID in switch!");
7952 case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
7953 case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
7954 case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
7955 case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
7956 case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
7957 }
7958 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7959 Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
7960 Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
7961 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
7962 return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
7963 }
7964 case NEON::BI__builtin_neon_vceqd_s64:
7965 case NEON::BI__builtin_neon_vceqd_u64:
7966 case NEON::BI__builtin_neon_vcgtd_s64:
7967 case NEON::BI__builtin_neon_vcgtd_u64:
7968 case NEON::BI__builtin_neon_vcltd_s64:
7969 case NEON::BI__builtin_neon_vcltd_u64:
7970 case NEON::BI__builtin_neon_vcged_u64:
7971 case NEON::BI__builtin_neon_vcged_s64:
7972 case NEON::BI__builtin_neon_vcled_u64:
7973 case NEON::BI__builtin_neon_vcled_s64: {
7974 llvm::CmpInst::Predicate P;
7975 switch (BuiltinID) {
7976 default: llvm_unreachable("missing builtin ID in switch!");
7977 case NEON::BI__builtin_neon_vceqd_s64:
7978 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
7979 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
7980 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
7981 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
7982 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
7983 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
7984 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
7985 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
7986 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
7987 }
7988 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7989 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
7990 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
7991 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
7992 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
7993 }
7994 case NEON::BI__builtin_neon_vtstd_s64:
7995 case NEON::BI__builtin_neon_vtstd_u64: {
7996 Ops.push_back(EmitScalarExpr(E->getArg(1)));
7997 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
7998 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
7999 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
8000 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
8001 llvm::Constant::getNullValue(Int64Ty));
8002 return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
8003 }
8004 case NEON::BI__builtin_neon_vset_lane_i8:
8005 case NEON::BI__builtin_neon_vset_lane_i16:
8006 case NEON::BI__builtin_neon_vset_lane_i32:
8007 case NEON::BI__builtin_neon_vset_lane_i64:
8008 case NEON::BI__builtin_neon_vset_lane_f32:
8009 case NEON::BI__builtin_neon_vsetq_lane_i8:
8010 case NEON::BI__builtin_neon_vsetq_lane_i16:
8011 case NEON::BI__builtin_neon_vsetq_lane_i32:
8012 case NEON::BI__builtin_neon_vsetq_lane_i64:
8013 case NEON::BI__builtin_neon_vsetq_lane_f32:
8014 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8015 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
8016 case NEON::BI__builtin_neon_vset_lane_f64:
8017 // The vector type needs a cast for the v1f64 variant.
8018 Ops[1] = Builder.CreateBitCast(Ops[1],
8019 llvm::VectorType::get(DoubleTy, 1));
8020 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8021 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
8022 case NEON::BI__builtin_neon_vsetq_lane_f64:
8023 // The vector type needs a cast for the v2f64 variant.
8024 Ops[1] = Builder.CreateBitCast(Ops[1],
8025 llvm::VectorType::get(DoubleTy, 2));
8026 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8027 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
8028
8029 case NEON::BI__builtin_neon_vget_lane_i8:
8030 case NEON::BI__builtin_neon_vdupb_lane_i8:
8031 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 8));
8032 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8033 "vget_lane");
8034 case NEON::BI__builtin_neon_vgetq_lane_i8:
8035 case NEON::BI__builtin_neon_vdupb_laneq_i8:
8036 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int8Ty, 16));
8037 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8038 "vgetq_lane");
8039 case NEON::BI__builtin_neon_vget_lane_i16:
8040 case NEON::BI__builtin_neon_vduph_lane_i16:
8041 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 4));
8042 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8043 "vget_lane");
8044 case NEON::BI__builtin_neon_vgetq_lane_i16:
8045 case NEON::BI__builtin_neon_vduph_laneq_i16:
8046 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int16Ty, 8));
8047 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8048 "vgetq_lane");
8049 case NEON::BI__builtin_neon_vget_lane_i32:
8050 case NEON::BI__builtin_neon_vdups_lane_i32:
8051 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 2));
8052 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8053 "vget_lane");
8054 case NEON::BI__builtin_neon_vdups_lane_f32:
8055 Ops[0] = Builder.CreateBitCast(Ops[0],
8056 llvm::VectorType::get(FloatTy, 2));
8057 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8058 "vdups_lane");
8059 case NEON::BI__builtin_neon_vgetq_lane_i32:
8060 case NEON::BI__builtin_neon_vdups_laneq_i32:
8061 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
8062 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8063 "vgetq_lane");
8064 case NEON::BI__builtin_neon_vget_lane_i64:
8065 case NEON::BI__builtin_neon_vdupd_lane_i64:
8066 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 1));
8067 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8068 "vget_lane");
8069 case NEON::BI__builtin_neon_vdupd_lane_f64:
8070 Ops[0] = Builder.CreateBitCast(Ops[0],
8071 llvm::VectorType::get(DoubleTy, 1));
8072 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8073 "vdupd_lane");
8074 case NEON::BI__builtin_neon_vgetq_lane_i64:
8075 case NEON::BI__builtin_neon_vdupd_laneq_i64:
8076 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
8077 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8078 "vgetq_lane");
8079 case NEON::BI__builtin_neon_vget_lane_f32:
8080 Ops[0] = Builder.CreateBitCast(Ops[0],
8081 llvm::VectorType::get(FloatTy, 2));
8082 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8083 "vget_lane");
8084 case NEON::BI__builtin_neon_vget_lane_f64:
8085 Ops[0] = Builder.CreateBitCast(Ops[0],
8086 llvm::VectorType::get(DoubleTy, 1));
8087 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8088 "vget_lane");
8089 case NEON::BI__builtin_neon_vgetq_lane_f32:
8090 case NEON::BI__builtin_neon_vdups_laneq_f32:
8091 Ops[0] = Builder.CreateBitCast(Ops[0],
8092 llvm::VectorType::get(FloatTy, 4));
8093 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8094 "vgetq_lane");
8095 case NEON::BI__builtin_neon_vgetq_lane_f64:
8096 case NEON::BI__builtin_neon_vdupd_laneq_f64:
8097 Ops[0] = Builder.CreateBitCast(Ops[0],
8098 llvm::VectorType::get(DoubleTy, 2));
8099 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8100 "vgetq_lane");
8101 case NEON::BI__builtin_neon_vaddh_f16:
8102 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8103 return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
8104 case NEON::BI__builtin_neon_vsubh_f16:
8105 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8106 return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
8107 case NEON::BI__builtin_neon_vmulh_f16:
8108 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8109 return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
8110 case NEON::BI__builtin_neon_vdivh_f16:
8111 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8112 return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
8113 case NEON::BI__builtin_neon_vfmah_f16: {
8114 Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
8115 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
8116 return Builder.CreateCall(F,
8117 {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
8118 }
8119 case NEON::BI__builtin_neon_vfmsh_f16: {
8120 Function *F = CGM.getIntrinsic(Intrinsic::fma, HalfTy);
8121 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
8122 Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
8123 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
8124 return Builder.CreateCall(F, {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
8125 }
8126 case NEON::BI__builtin_neon_vaddd_s64:
8127 case NEON::BI__builtin_neon_vaddd_u64:
8128 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
8129 case NEON::BI__builtin_neon_vsubd_s64:
8130 case NEON::BI__builtin_neon_vsubd_u64:
8131 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
8132 case NEON::BI__builtin_neon_vqdmlalh_s16:
8133 case NEON::BI__builtin_neon_vqdmlslh_s16: {
8134 SmallVector<Value *, 2> ProductOps;
8135 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
8136 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
8137 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
8138 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
8139 ProductOps, "vqdmlXl");
8140 Constant *CI = ConstantInt::get(SizeTy, 0);
8141 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
8142
8143 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
8144 ? Intrinsic::aarch64_neon_sqadd
8145 : Intrinsic::aarch64_neon_sqsub;
8146 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
8147 }
8148 case NEON::BI__builtin_neon_vqshlud_n_s64: {
8149 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8150 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
8151 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
8152 Ops, "vqshlu_n");
8153 }
8154 case NEON::BI__builtin_neon_vqshld_n_u64:
8155 case NEON::BI__builtin_neon_vqshld_n_s64: {
8156 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
8157 ? Intrinsic::aarch64_neon_uqshl
8158 : Intrinsic::aarch64_neon_sqshl;
8159 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8160 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
8161 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
8162 }
8163 case NEON::BI__builtin_neon_vrshrd_n_u64:
8164 case NEON::BI__builtin_neon_vrshrd_n_s64: {
8165 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
8166 ? Intrinsic::aarch64_neon_urshl
8167 : Intrinsic::aarch64_neon_srshl;
8168 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8169 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
8170 Ops[1] = ConstantInt::get(Int64Ty, -SV);
8171 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
8172 }
8173 case NEON::BI__builtin_neon_vrsrad_n_u64:
8174 case NEON::BI__builtin_neon_vrsrad_n_s64: {
8175 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
8176 ? Intrinsic::aarch64_neon_urshl
8177 : Intrinsic::aarch64_neon_srshl;
8178 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
8179 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
8180 Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
8181 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
8182 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
8183 }
8184 case NEON::BI__builtin_neon_vshld_n_s64:
8185 case NEON::BI__builtin_neon_vshld_n_u64: {
8186 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
8187 return Builder.CreateShl(
8188 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
8189 }
8190 case NEON::BI__builtin_neon_vshrd_n_s64: {
8191 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
8192 return Builder.CreateAShr(
8193 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
8194 Amt->getZExtValue())),
8195 "shrd_n");
8196 }
8197 case NEON::BI__builtin_neon_vshrd_n_u64: {
8198 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
8199 uint64_t ShiftAmt = Amt->getZExtValue();
8200 // Right-shifting an unsigned value by its size yields 0.
8201 if (ShiftAmt == 64)
8202 return ConstantInt::get(Int64Ty, 0);
8203 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
8204 "shrd_n");
8205 }
8206 case NEON::BI__builtin_neon_vsrad_n_s64: {
8207 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
8208 Ops[1] = Builder.CreateAShr(
8209 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
8210 Amt->getZExtValue())),
8211 "shrd_n");
8212 return Builder.CreateAdd(Ops[0], Ops[1]);
8213 }
8214 case NEON::BI__builtin_neon_vsrad_n_u64: {
8215 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
8216 uint64_t ShiftAmt = Amt->getZExtValue();
8217 // Right-shifting an unsigned value by its size yields 0.
8218 // As Op + 0 = Op, return Ops[0] directly.
8219 if (ShiftAmt == 64)
8220 return Ops[0];
8221 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
8222 "shrd_n");
8223 return Builder.CreateAdd(Ops[0], Ops[1]);
8224 }
8225 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
8226 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
8227 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
8228 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
8229 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
8230 "lane");
8231 SmallVector<Value *, 2> ProductOps;
8232 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
8233 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
8234 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
8235 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
8236 ProductOps, "vqdmlXl");
8237 Constant *CI = ConstantInt::get(SizeTy, 0);
8238 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
8239 Ops.pop_back();
8240
8241 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
8242 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
8243 ? Intrinsic::aarch64_neon_sqadd
8244 : Intrinsic::aarch64_neon_sqsub;
8245 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
8246 }
8247 case NEON::BI__builtin_neon_vqdmlals_s32:
8248 case NEON::BI__builtin_neon_vqdmlsls_s32: {
8249 SmallVector<Value *, 2> ProductOps;
8250 ProductOps.push_back(Ops[1]);
8251 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
8252 Ops[1] =
8253 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
8254 ProductOps, "vqdmlXl");
8255
8256 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
8257 ? Intrinsic::aarch64_neon_sqadd
8258 : Intrinsic::aarch64_neon_sqsub;
8259 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
8260 }
8261 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
8262 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
8263 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
8264 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
8265 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
8266 "lane");
8267 SmallVector<Value *, 2> ProductOps;
8268 ProductOps.push_back(Ops[1]);
8269 ProductOps.push_back(Ops[2]);
8270 Ops[1] =
8271 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
8272 ProductOps, "vqdmlXl");
8273 Ops.pop_back();
8274
8275 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
8276 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
8277 ? Intrinsic::aarch64_neon_sqadd
8278 : Intrinsic::aarch64_neon_sqsub;
8279 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
8280 }
8281 case NEON::BI__builtin_neon_vduph_lane_f16: {
8282 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8283 "vget_lane");
8284 }
8285 case NEON::BI__builtin_neon_vduph_laneq_f16: {
8286 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
8287 "vgetq_lane");
8288 }
8289 }
8290
8291 llvm::VectorType *VTy = GetNeonType(this, Type);
8292 llvm::Type *Ty = VTy;
8293 if (!Ty)
8294 return nullptr;
8295
8296 // Not all intrinsics handled by the common case work for AArch64 yet, so only
8297 // defer to common code if it's been added to our special map.
8298 Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
8299 AArch64SIMDIntrinsicsProvenSorted);
8300
8301 if (Builtin)
8302 return EmitCommonNeonBuiltinExpr(
8303 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
8304 Builtin->NameHint, Builtin->TypeModifier, E, Ops,
8305 /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
8306
8307 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
8308 return V;
8309
8310 unsigned Int;
8311 switch (BuiltinID) {
8312 default: return nullptr;
8313 case NEON::BI__builtin_neon_vbsl_v:
8314 case NEON::BI__builtin_neon_vbslq_v: {
8315 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
8316 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
8317 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
8318 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
8319
8320 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
8321 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
8322 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
8323 return Builder.CreateBitCast(Ops[0], Ty);
8324 }
8325 case NEON::BI__builtin_neon_vfma_lane_v:
8326 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
8327 // The ARM builtins (and instructions) have the addend as the first
8328 // operand, but the 'fma' intrinsics have it last. Swap it around here.
8329 Value *Addend = Ops[0];
8330 Value *Multiplicand = Ops[1];
8331 Value *LaneSource = Ops[2];
8332 Ops[0] = Multiplicand;
8333 Ops[1] = LaneSource;
8334 Ops[2] = Addend;
8335
8336 // Now adjust things to handle the lane access.
8337 llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
8338 llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
8339 VTy;
8340 llvm::Constant *cst = cast<Constant>(Ops[3]);
8341 Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
8342 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
8343 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
8344
8345 Ops.pop_back();
8346 Int = Intrinsic::fma;
8347 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
8348 }
8349 case NEON::BI__builtin_neon_vfma_laneq_v: {
8350 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
8351 // v1f64 fma should be mapped to Neon scalar f64 fma
8352 if (VTy && VTy->getElementType() == DoubleTy) {
8353 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
8354 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
8355 llvm::Type *VTy = GetNeonType(this,
8356 NeonTypeFlags(NeonTypeFlags::Float64, false, true));
8357 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
8358 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
8359 Function *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
8360 Value *Result = Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
8361 return Builder.CreateBitCast(Result, Ty);
8362 }
8363 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8364 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8365 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8366
8367 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
8368 VTy->getNumElements() * 2);
8369 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
8370 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
8371 cast<ConstantInt>(Ops[3]));
8372 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
8373
8374 return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
8375 }
8376 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
8377 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8378 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8379 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8380
8381 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
8382 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
8383 return Builder.CreateCall(F, {Ops[2], Ops[1], Ops[0]});
8384 }
8385 case NEON::BI__builtin_neon_vfmah_lane_f16:
8386 case NEON::BI__builtin_neon_vfmas_lane_f32:
8387 case NEON::BI__builtin_neon_vfmah_laneq_f16:
8388 case NEON::BI__builtin_neon_vfmas_laneq_f32:
8389 case NEON::BI__builtin_neon_vfmad_lane_f64:
8390 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
8391 Ops.push_back(EmitScalarExpr(E->getArg(3)));
8392 llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
8393 Function *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
8394 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
8395 return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0]});
8396 }
8397 case NEON::BI__builtin_neon_vmull_v:
8398 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8399 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
8400 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
8401 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
8402 case NEON::BI__builtin_neon_vmax_v:
8403 case NEON::BI__builtin_neon_vmaxq_v:
8404 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8405 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
8406 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
8407 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
8408 case NEON::BI__builtin_neon_vmaxh_f16: {
8409 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8410 Int = Intrinsic::aarch64_neon_fmax;
8411 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
8412 }
8413 case NEON::BI__builtin_neon_vmin_v:
8414 case NEON::BI__builtin_neon_vminq_v:
8415 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8416 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
8417 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
8418 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
8419 case NEON::BI__builtin_neon_vminh_f16: {
8420 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8421 Int = Intrinsic::aarch64_neon_fmin;
8422 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
8423 }
8424 case NEON::BI__builtin_neon_vabd_v:
8425 case NEON::BI__builtin_neon_vabdq_v:
8426 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8427 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
8428 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
8429 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
8430 case NEON::BI__builtin_neon_vpadal_v:
8431 case NEON::BI__builtin_neon_vpadalq_v: {
8432 unsigned ArgElts = VTy->getNumElements();
8433 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
8434 unsigned BitWidth = EltTy->getBitWidth();
8435 llvm::Type *ArgTy = llvm::VectorType::get(
8436 llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
8437 llvm::Type* Tys[2] = { VTy, ArgTy };
8438 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
8439 SmallVector<llvm::Value*, 1> TmpOps;
8440 TmpOps.push_back(Ops[1]);
8441 Function *F = CGM.getIntrinsic(Int, Tys);
8442 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
8443 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
8444 return Builder.CreateAdd(tmp, addend);
8445 }
8446 case NEON::BI__builtin_neon_vpmin_v:
8447 case NEON::BI__builtin_neon_vpminq_v:
8448 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8449 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
8450 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
8451 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
8452 case NEON::BI__builtin_neon_vpmax_v:
8453 case NEON::BI__builtin_neon_vpmaxq_v:
8454 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
8455 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
8456 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
8457 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
8458 case NEON::BI__builtin_neon_vminnm_v:
8459 case NEON::BI__builtin_neon_vminnmq_v:
8460 Int = Intrinsic::aarch64_neon_fminnm;
8461 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
8462 case NEON::BI__builtin_neon_vminnmh_f16:
8463 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8464 Int = Intrinsic::aarch64_neon_fminnm;
8465 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
8466 case NEON::BI__builtin_neon_vmaxnm_v:
8467 case NEON::BI__builtin_neon_vmaxnmq_v:
8468 Int = Intrinsic::aarch64_neon_fmaxnm;
8469 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
8470 case NEON::BI__builtin_neon_vmaxnmh_f16:
8471 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8472 Int = Intrinsic::aarch64_neon_fmaxnm;
8473 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
8474 case NEON::BI__builtin_neon_vrecpss_f32: {
8475 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8476 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
8477 Ops, "vrecps");
8478 }
8479 case NEON::BI__builtin_neon_vrecpsd_f64:
8480 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8481 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
8482 Ops, "vrecps");
8483 case NEON::BI__builtin_neon_vrecpsh_f16:
8484 Ops.push_back(EmitScalarExpr(E->getArg(1)));
8485 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
8486 Ops, "vrecps");
8487 case NEON::BI__builtin_neon_vqshrun_n_v:
8488 Int = Intrinsic::aarch64_neon_sqshrun;
8489 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
8490 case NEON::BI__builtin_neon_vqrshrun_n_v:
8491 Int = Intrinsic::aarch64_neon_sqrshrun;
8492 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
8493 case NEON::BI__builtin_neon_vqshrn_n_v:
8494 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
8495 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
8496 case NEON::BI__builtin_neon_vrshrn_n_v:
8497 Int = Intrinsic::aarch64_neon_rshrn;
8498 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
8499 case NEON::BI__builtin_neon_vqrshrn_n_v:
8500 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
8501 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
8502 case NEON::BI__builtin_neon_vrndah_f16: {
8503 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8504 Int = Intrinsic::round;
8505 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
8506 }
8507 case NEON::BI__builtin_neon_vrnda_v:
8508 case NEON::BI__builtin_neon_vrndaq_v: {
8509 Int = Intrinsic::round;
8510 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
8511 }
8512 case NEON::BI__builtin_neon_vrndih_f16: {
8513 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8514 Int = Intrinsic::nearbyint;
8515 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
8516 }
8517 case NEON::BI__builtin_neon_vrndmh_f16: {
8518 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8519 Int = Intrinsic::floor;
8520 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
8521 }
8522 case NEON::BI__builtin_neon_vrndm_v:
8523 case NEON::BI__builtin_neon_vrndmq_v: {
8524 Int = Intrinsic::floor;
8525 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
8526 }
8527 case NEON::BI__builtin_neon_vrndnh_f16: {
8528 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8529 Int = Intrinsic::aarch64_neon_frintn;
8530 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
8531 }
8532 case NEON::BI__builtin_neon_vrndn_v:
8533 case NEON::BI__builtin_neon_vrndnq_v: {
8534 Int = Intrinsic::aarch64_neon_frintn;
8535 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
8536 }
8537 case NEON::BI__builtin_neon_vrndns_f32: {
8538 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8539 Int = Intrinsic::aarch64_neon_frintn;
8540 return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
8541 }
8542 case NEON::BI__builtin_neon_vrndph_f16: {
8543 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8544 Int = Intrinsic::ceil;
8545 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
8546 }
8547 case NEON::BI__builtin_neon_vrndp_v:
8548 case NEON::BI__builtin_neon_vrndpq_v: {
8549 Int = Intrinsic::ceil;
8550 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
8551 }
8552 case NEON::BI__builtin_neon_vrndxh_f16: {
8553 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8554 Int = Intrinsic::rint;
8555 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
8556 }
8557 case NEON::BI__builtin_neon_vrndx_v:
8558 case NEON::BI__builtin_neon_vrndxq_v: {
8559 Int = Intrinsic::rint;
8560 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
8561 }
8562 case NEON::BI__builtin_neon_vrndh_f16: {
8563 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8564 Int = Intrinsic::trunc;
8565 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
8566 }
8567 case NEON::BI__builtin_neon_vrnd_v:
8568 case NEON::BI__builtin_neon_vrndq_v: {
8569 Int = Intrinsic::trunc;
8570 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
8571 }
8572 case NEON::BI__builtin_neon_vcvt_f64_v:
8573 case NEON::BI__builtin_neon_vcvtq_f64_v:
8574 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8575 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
8576 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
8577 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
8578 case NEON::BI__builtin_neon_vcvt_f64_f32: {
8579 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
8580 "unexpected vcvt_f64_f32 builtin");
8581 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
8582 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
8583
8584 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
8585 }
8586 case NEON::BI__builtin_neon_vcvt_f32_f64: {
8587 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
8588 "unexpected vcvt_f32_f64 builtin");
8589 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
8590 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
8591
8592 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
8593 }
8594 case NEON::BI__builtin_neon_vcvt_s32_v:
8595 case NEON::BI__builtin_neon_vcvt_u32_v:
8596 case NEON::BI__builtin_neon_vcvt_s64_v:
8597 case NEON::BI__builtin_neon_vcvt_u64_v:
8598 case NEON::BI__builtin_neon_vcvt_s16_v:
8599 case NEON::BI__builtin_neon_vcvt_u16_v:
8600 case NEON::BI__builtin_neon_vcvtq_s32_v:
8601 case NEON::BI__builtin_neon_vcvtq_u32_v:
8602 case NEON::BI__builtin_neon_vcvtq_s64_v:
8603 case NEON::BI__builtin_neon_vcvtq_u64_v:
8604 case NEON::BI__builtin_neon_vcvtq_s16_v:
8605 case NEON::BI__builtin_neon_vcvtq_u16_v: {
8606 Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
8607 if (usgn)
8608 return Builder.CreateFPToUI(Ops[0], Ty);
8609 return Builder.CreateFPToSI(Ops[0], Ty);
8610 }
8611 case NEON::BI__builtin_neon_vcvta_s16_v:
8612 case NEON::BI__builtin_neon_vcvta_u16_v:
8613 case NEON::BI__builtin_neon_vcvta_s32_v:
8614 case NEON::BI__builtin_neon_vcvtaq_s16_v:
8615 case NEON::BI__builtin_neon_vcvtaq_s32_v:
8616 case NEON::BI__builtin_neon_vcvta_u32_v:
8617 case NEON::BI__builtin_neon_vcvtaq_u16_v:
8618 case NEON::BI__builtin_neon_vcvtaq_u32_v:
8619 case NEON::BI__builtin_neon_vcvta_s64_v:
8620 case NEON::BI__builtin_neon_vcvtaq_s64_v:
8621 case NEON::BI__builtin_neon_vcvta_u64_v:
8622 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
8623 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
8624 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8625 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
8626 }
8627 case NEON::BI__builtin_neon_vcvtm_s16_v:
8628 case NEON::BI__builtin_neon_vcvtm_s32_v:
8629 case NEON::BI__builtin_neon_vcvtmq_s16_v:
8630 case NEON::BI__builtin_neon_vcvtmq_s32_v:
8631 case NEON::BI__builtin_neon_vcvtm_u16_v:
8632 case NEON::BI__builtin_neon_vcvtm_u32_v:
8633 case NEON::BI__builtin_neon_vcvtmq_u16_v:
8634 case NEON::BI__builtin_neon_vcvtmq_u32_v:
8635 case NEON::BI__builtin_neon_vcvtm_s64_v:
8636 case NEON::BI__builtin_neon_vcvtmq_s64_v:
8637 case NEON::BI__builtin_neon_vcvtm_u64_v:
8638 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
8639 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
8640 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8641 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
8642 }
8643 case NEON::BI__builtin_neon_vcvtn_s16_v:
8644 case NEON::BI__builtin_neon_vcvtn_s32_v:
8645 case NEON::BI__builtin_neon_vcvtnq_s16_v:
8646 case NEON::BI__builtin_neon_vcvtnq_s32_v:
8647 case NEON::BI__builtin_neon_vcvtn_u16_v:
8648 case NEON::BI__builtin_neon_vcvtn_u32_v:
8649 case NEON::BI__builtin_neon_vcvtnq_u16_v:
8650 case NEON::BI__builtin_neon_vcvtnq_u32_v:
8651 case NEON::BI__builtin_neon_vcvtn_s64_v:
8652 case NEON::BI__builtin_neon_vcvtnq_s64_v:
8653 case NEON::BI__builtin_neon_vcvtn_u64_v:
8654 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
8655 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
8656 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8657 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
8658 }
8659 case NEON::BI__builtin_neon_vcvtp_s16_v:
8660 case NEON::BI__builtin_neon_vcvtp_s32_v:
8661 case NEON::BI__builtin_neon_vcvtpq_s16_v:
8662 case NEON::BI__builtin_neon_vcvtpq_s32_v:
8663 case NEON::BI__builtin_neon_vcvtp_u16_v:
8664 case NEON::BI__builtin_neon_vcvtp_u32_v:
8665 case NEON::BI__builtin_neon_vcvtpq_u16_v:
8666 case NEON::BI__builtin_neon_vcvtpq_u32_v:
8667 case NEON::BI__builtin_neon_vcvtp_s64_v:
8668 case NEON::BI__builtin_neon_vcvtpq_s64_v:
8669 case NEON::BI__builtin_neon_vcvtp_u64_v:
8670 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
8671 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
8672 llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
8673 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
8674 }
8675 case NEON::BI__builtin_neon_vmulx_v:
8676 case NEON::BI__builtin_neon_vmulxq_v: {
8677 Int = Intrinsic::aarch64_neon_fmulx;
8678 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
8679 }
8680 case NEON::BI__builtin_neon_vmulxh_lane_f16:
8681 case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
8682 // vmulx_lane should be mapped to Neon scalar mulx after
8683 // extracting the scalar element
8684 Ops.push_back(EmitScalarExpr(E->getArg(2)));
8685 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
8686 Ops.pop_back();
8687 Int = Intrinsic::aarch64_neon_fmulx;
8688 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
8689 }
8690 case NEON::BI__builtin_neon_vmul_lane_v:
8691 case NEON::BI__builtin_neon_vmul_laneq_v: {
8692 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
8693 bool Quad = false;
8694 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
8695 Quad = true;
8696 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
8697 llvm::Type *VTy = GetNeonType(this,
8698 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
8699 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
8700 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
8701 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
8702 return Builder.CreateBitCast(Result, Ty);
8703 }
8704 case NEON::BI__builtin_neon_vnegd_s64:
8705 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
8706 case NEON::BI__builtin_neon_vnegh_f16:
8707 return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
8708 case NEON::BI__builtin_neon_vpmaxnm_v:
8709 case NEON::BI__builtin_neon_vpmaxnmq_v: {
8710 Int = Intrinsic::aarch64_neon_fmaxnmp;
8711 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
8712 }
8713 case NEON::BI__builtin_neon_vpminnm_v:
8714 case NEON::BI__builtin_neon_vpminnmq_v: {
8715 Int = Intrinsic::aarch64_neon_fminnmp;
8716 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
8717 }
8718 case NEON::BI__builtin_neon_vsqrth_f16: {
8719 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8720 Int = Intrinsic::sqrt;
8721 return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
8722 }
8723 case NEON::BI__builtin_neon_vsqrt_v:
8724 case NEON::BI__builtin_neon_vsqrtq_v: {
8725 Int = Intrinsic::sqrt;
8726 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8727 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
8728 }
8729 case NEON::BI__builtin_neon_vrbit_v:
8730 case NEON::BI__builtin_neon_vrbitq_v: {
8731 Int = Intrinsic::aarch64_neon_rbit;
8732 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
8733 }
8734 case NEON::BI__builtin_neon_vaddv_u8:
8735 // FIXME: These are handled by the AArch64 scalar code.
8736 usgn = true;
8737 LLVM_FALLTHROUGH;
8738 case NEON::BI__builtin_neon_vaddv_s8: {
8739 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8740 Ty = Int32Ty;
8741 VTy = llvm::VectorType::get(Int8Ty, 8);
8742 llvm::Type *Tys[2] = { Ty, VTy };
8743 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8744 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8745 return Builder.CreateTrunc(Ops[0], Int8Ty);
8746 }
8747 case NEON::BI__builtin_neon_vaddv_u16:
8748 usgn = true;
8749 LLVM_FALLTHROUGH;
8750 case NEON::BI__builtin_neon_vaddv_s16: {
8751 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8752 Ty = Int32Ty;
8753 VTy = llvm::VectorType::get(Int16Ty, 4);
8754 llvm::Type *Tys[2] = { Ty, VTy };
8755 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8756 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8757 return Builder.CreateTrunc(Ops[0], Int16Ty);
8758 }
8759 case NEON::BI__builtin_neon_vaddvq_u8:
8760 usgn = true;
8761 LLVM_FALLTHROUGH;
8762 case NEON::BI__builtin_neon_vaddvq_s8: {
8763 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8764 Ty = Int32Ty;
8765 VTy = llvm::VectorType::get(Int8Ty, 16);
8766 llvm::Type *Tys[2] = { Ty, VTy };
8767 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8768 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8769 return Builder.CreateTrunc(Ops[0], Int8Ty);
8770 }
8771 case NEON::BI__builtin_neon_vaddvq_u16:
8772 usgn = true;
8773 LLVM_FALLTHROUGH;
8774 case NEON::BI__builtin_neon_vaddvq_s16: {
8775 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
8776 Ty = Int32Ty;
8777 VTy = llvm::VectorType::get(Int16Ty, 8);
8778 llvm::Type *Tys[2] = { Ty, VTy };
8779 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8780 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
8781 return Builder.CreateTrunc(Ops[0], Int16Ty);
8782 }
8783 case NEON::BI__builtin_neon_vmaxv_u8: {
8784 Int = Intrinsic::aarch64_neon_umaxv;
8785 Ty = Int32Ty;
8786 VTy = llvm::VectorType::get(Int8Ty, 8);
8787 llvm::Type *Tys[2] = { Ty, VTy };
8788 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8789 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8790 return Builder.CreateTrunc(Ops[0], Int8Ty);
8791 }
8792 case NEON::BI__builtin_neon_vmaxv_u16: {
8793 Int = Intrinsic::aarch64_neon_umaxv;
8794 Ty = Int32Ty;
8795 VTy = llvm::VectorType::get(Int16Ty, 4);
8796 llvm::Type *Tys[2] = { Ty, VTy };
8797 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8798 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8799 return Builder.CreateTrunc(Ops[0], Int16Ty);
8800 }
8801 case NEON::BI__builtin_neon_vmaxvq_u8: {
8802 Int = Intrinsic::aarch64_neon_umaxv;
8803 Ty = Int32Ty;
8804 VTy = llvm::VectorType::get(Int8Ty, 16);
8805 llvm::Type *Tys[2] = { Ty, VTy };
8806 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8807 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8808 return Builder.CreateTrunc(Ops[0], Int8Ty);
8809 }
8810 case NEON::BI__builtin_neon_vmaxvq_u16: {
8811 Int = Intrinsic::aarch64_neon_umaxv;
8812 Ty = Int32Ty;
8813 VTy = llvm::VectorType::get(Int16Ty, 8);
8814 llvm::Type *Tys[2] = { Ty, VTy };
8815 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8816 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8817 return Builder.CreateTrunc(Ops[0], Int16Ty);
8818 }
8819 case NEON::BI__builtin_neon_vmaxv_s8: {
8820 Int = Intrinsic::aarch64_neon_smaxv;
8821 Ty = Int32Ty;
8822 VTy = llvm::VectorType::get(Int8Ty, 8);
8823 llvm::Type *Tys[2] = { Ty, VTy };
8824 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8825 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8826 return Builder.CreateTrunc(Ops[0], Int8Ty);
8827 }
8828 case NEON::BI__builtin_neon_vmaxv_s16: {
8829 Int = Intrinsic::aarch64_neon_smaxv;
8830 Ty = Int32Ty;
8831 VTy = llvm::VectorType::get(Int16Ty, 4);
8832 llvm::Type *Tys[2] = { Ty, VTy };
8833 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8834 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8835 return Builder.CreateTrunc(Ops[0], Int16Ty);
8836 }
8837 case NEON::BI__builtin_neon_vmaxvq_s8: {
8838 Int = Intrinsic::aarch64_neon_smaxv;
8839 Ty = Int32Ty;
8840 VTy = llvm::VectorType::get(Int8Ty, 16);
8841 llvm::Type *Tys[2] = { Ty, VTy };
8842 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8843 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8844 return Builder.CreateTrunc(Ops[0], Int8Ty);
8845 }
8846 case NEON::BI__builtin_neon_vmaxvq_s16: {
8847 Int = Intrinsic::aarch64_neon_smaxv;
8848 Ty = Int32Ty;
8849 VTy = llvm::VectorType::get(Int16Ty, 8);
8850 llvm::Type *Tys[2] = { Ty, VTy };
8851 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8852 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8853 return Builder.CreateTrunc(Ops[0], Int16Ty);
8854 }
8855 case NEON::BI__builtin_neon_vmaxv_f16: {
8856 Int = Intrinsic::aarch64_neon_fmaxv;
8857 Ty = HalfTy;
8858 VTy = llvm::VectorType::get(HalfTy, 4);
8859 llvm::Type *Tys[2] = { Ty, VTy };
8860 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8861 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8862 return Builder.CreateTrunc(Ops[0], HalfTy);
8863 }
8864 case NEON::BI__builtin_neon_vmaxvq_f16: {
8865 Int = Intrinsic::aarch64_neon_fmaxv;
8866 Ty = HalfTy;
8867 VTy = llvm::VectorType::get(HalfTy, 8);
8868 llvm::Type *Tys[2] = { Ty, VTy };
8869 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8870 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
8871 return Builder.CreateTrunc(Ops[0], HalfTy);
8872 }
8873 case NEON::BI__builtin_neon_vminv_u8: {
8874 Int = Intrinsic::aarch64_neon_uminv;
8875 Ty = Int32Ty;
8876 VTy = llvm::VectorType::get(Int8Ty, 8);
8877 llvm::Type *Tys[2] = { Ty, VTy };
8878 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8879 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8880 return Builder.CreateTrunc(Ops[0], Int8Ty);
8881 }
8882 case NEON::BI__builtin_neon_vminv_u16: {
8883 Int = Intrinsic::aarch64_neon_uminv;
8884 Ty = Int32Ty;
8885 VTy = llvm::VectorType::get(Int16Ty, 4);
8886 llvm::Type *Tys[2] = { Ty, VTy };
8887 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8888 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8889 return Builder.CreateTrunc(Ops[0], Int16Ty);
8890 }
8891 case NEON::BI__builtin_neon_vminvq_u8: {
8892 Int = Intrinsic::aarch64_neon_uminv;
8893 Ty = Int32Ty;
8894 VTy = llvm::VectorType::get(Int8Ty, 16);
8895 llvm::Type *Tys[2] = { Ty, VTy };
8896 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8897 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8898 return Builder.CreateTrunc(Ops[0], Int8Ty);
8899 }
8900 case NEON::BI__builtin_neon_vminvq_u16: {
8901 Int = Intrinsic::aarch64_neon_uminv;
8902 Ty = Int32Ty;
8903 VTy = llvm::VectorType::get(Int16Ty, 8);
8904 llvm::Type *Tys[2] = { Ty, VTy };
8905 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8906 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8907 return Builder.CreateTrunc(Ops[0], Int16Ty);
8908 }
8909 case NEON::BI__builtin_neon_vminv_s8: {
8910 Int = Intrinsic::aarch64_neon_sminv;
8911 Ty = Int32Ty;
8912 VTy = llvm::VectorType::get(Int8Ty, 8);
8913 llvm::Type *Tys[2] = { Ty, VTy };
8914 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8915 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8916 return Builder.CreateTrunc(Ops[0], Int8Ty);
8917 }
8918 case NEON::BI__builtin_neon_vminv_s16: {
8919 Int = Intrinsic::aarch64_neon_sminv;
8920 Ty = Int32Ty;
8921 VTy = llvm::VectorType::get(Int16Ty, 4);
8922 llvm::Type *Tys[2] = { Ty, VTy };
8923 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8924 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8925 return Builder.CreateTrunc(Ops[0], Int16Ty);
8926 }
8927 case NEON::BI__builtin_neon_vminvq_s8: {
8928 Int = Intrinsic::aarch64_neon_sminv;
8929 Ty = Int32Ty;
8930 VTy = llvm::VectorType::get(Int8Ty, 16);
8931 llvm::Type *Tys[2] = { Ty, VTy };
8932 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8933 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8934 return Builder.CreateTrunc(Ops[0], Int8Ty);
8935 }
8936 case NEON::BI__builtin_neon_vminvq_s16: {
8937 Int = Intrinsic::aarch64_neon_sminv;
8938 Ty = Int32Ty;
8939 VTy = llvm::VectorType::get(Int16Ty, 8);
8940 llvm::Type *Tys[2] = { Ty, VTy };
8941 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8942 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8943 return Builder.CreateTrunc(Ops[0], Int16Ty);
8944 }
8945 case NEON::BI__builtin_neon_vminv_f16: {
8946 Int = Intrinsic::aarch64_neon_fminv;
8947 Ty = HalfTy;
8948 VTy = llvm::VectorType::get(HalfTy, 4);
8949 llvm::Type *Tys[2] = { Ty, VTy };
8950 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8951 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8952 return Builder.CreateTrunc(Ops[0], HalfTy);
8953 }
8954 case NEON::BI__builtin_neon_vminvq_f16: {
8955 Int = Intrinsic::aarch64_neon_fminv;
8956 Ty = HalfTy;
8957 VTy = llvm::VectorType::get(HalfTy, 8);
8958 llvm::Type *Tys[2] = { Ty, VTy };
8959 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8960 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
8961 return Builder.CreateTrunc(Ops[0], HalfTy);
8962 }
8963 case NEON::BI__builtin_neon_vmaxnmv_f16: {
8964 Int = Intrinsic::aarch64_neon_fmaxnmv;
8965 Ty = HalfTy;
8966 VTy = llvm::VectorType::get(HalfTy, 4);
8967 llvm::Type *Tys[2] = { Ty, VTy };
8968 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8969 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
8970 return Builder.CreateTrunc(Ops[0], HalfTy);
8971 }
8972 case NEON::BI__builtin_neon_vmaxnmvq_f16: {
8973 Int = Intrinsic::aarch64_neon_fmaxnmv;
8974 Ty = HalfTy;
8975 VTy = llvm::VectorType::get(HalfTy, 8);
8976 llvm::Type *Tys[2] = { Ty, VTy };
8977 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8978 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
8979 return Builder.CreateTrunc(Ops[0], HalfTy);
8980 }
8981 case NEON::BI__builtin_neon_vminnmv_f16: {
8982 Int = Intrinsic::aarch64_neon_fminnmv;
8983 Ty = HalfTy;
8984 VTy = llvm::VectorType::get(HalfTy, 4);
8985 llvm::Type *Tys[2] = { Ty, VTy };
8986 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8987 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
8988 return Builder.CreateTrunc(Ops[0], HalfTy);
8989 }
8990 case NEON::BI__builtin_neon_vminnmvq_f16: {
8991 Int = Intrinsic::aarch64_neon_fminnmv;
8992 Ty = HalfTy;
8993 VTy = llvm::VectorType::get(HalfTy, 8);
8994 llvm::Type *Tys[2] = { Ty, VTy };
8995 Ops.push_back(EmitScalarExpr(E->getArg(0)));
8996 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
8997 return Builder.CreateTrunc(Ops[0], HalfTy);
8998 }
8999 case NEON::BI__builtin_neon_vmul_n_f64: {
9000 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
9001 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
9002 return Builder.CreateFMul(Ops[0], RHS);
9003 }
9004 case NEON::BI__builtin_neon_vaddlv_u8: {
9005 Int = Intrinsic::aarch64_neon_uaddlv;
9006 Ty = Int32Ty;
9007 VTy = llvm::VectorType::get(Int8Ty, 8);
9008 llvm::Type *Tys[2] = { Ty, VTy };
9009 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9010 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9011 return Builder.CreateTrunc(Ops[0], Int16Ty);
9012 }
9013 case NEON::BI__builtin_neon_vaddlv_u16: {
9014 Int = Intrinsic::aarch64_neon_uaddlv;
9015 Ty = Int32Ty;
9016 VTy = llvm::VectorType::get(Int16Ty, 4);
9017 llvm::Type *Tys[2] = { Ty, VTy };
9018 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9019 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9020 }
9021 case NEON::BI__builtin_neon_vaddlvq_u8: {
9022 Int = Intrinsic::aarch64_neon_uaddlv;
9023 Ty = Int32Ty;
9024 VTy = llvm::VectorType::get(Int8Ty, 16);
9025 llvm::Type *Tys[2] = { Ty, VTy };
9026 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9027 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9028 return Builder.CreateTrunc(Ops[0], Int16Ty);
9029 }
9030 case NEON::BI__builtin_neon_vaddlvq_u16: {
9031 Int = Intrinsic::aarch64_neon_uaddlv;
9032 Ty = Int32Ty;
9033 VTy = llvm::VectorType::get(Int16Ty, 8);
9034 llvm::Type *Tys[2] = { Ty, VTy };
9035 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9036 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9037 }
9038 case NEON::BI__builtin_neon_vaddlv_s8: {
9039 Int = Intrinsic::aarch64_neon_saddlv;
9040 Ty = Int32Ty;
9041 VTy = llvm::VectorType::get(Int8Ty, 8);
9042 llvm::Type *Tys[2] = { Ty, VTy };
9043 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9044 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9045 return Builder.CreateTrunc(Ops[0], Int16Ty);
9046 }
9047 case NEON::BI__builtin_neon_vaddlv_s16: {
9048 Int = Intrinsic::aarch64_neon_saddlv;
9049 Ty = Int32Ty;
9050 VTy = llvm::VectorType::get(Int16Ty, 4);
9051 llvm::Type *Tys[2] = { Ty, VTy };
9052 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9053 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9054 }
9055 case NEON::BI__builtin_neon_vaddlvq_s8: {
9056 Int = Intrinsic::aarch64_neon_saddlv;
9057 Ty = Int32Ty;
9058 VTy = llvm::VectorType::get(Int8Ty, 16);
9059 llvm::Type *Tys[2] = { Ty, VTy };
9060 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9061 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9062 return Builder.CreateTrunc(Ops[0], Int16Ty);
9063 }
9064 case NEON::BI__builtin_neon_vaddlvq_s16: {
9065 Int = Intrinsic::aarch64_neon_saddlv;
9066 Ty = Int32Ty;
9067 VTy = llvm::VectorType::get(Int16Ty, 8);
9068 llvm::Type *Tys[2] = { Ty, VTy };
9069 Ops.push_back(EmitScalarExpr(E->getArg(0)));
9070 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
9071 }
9072 case NEON::BI__builtin_neon_vsri_n_v:
9073 case NEON::BI__builtin_neon_vsriq_n_v: {
9074 Int = Intrinsic::aarch64_neon_vsri;
9075 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
9076 return EmitNeonCall(Intrin, Ops, "vsri_n");
9077 }
9078 case NEON::BI__builtin_neon_vsli_n_v:
9079 case NEON::BI__builtin_neon_vsliq_n_v: {
9080 Int = Intrinsic::aarch64_neon_vsli;
9081 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
9082 return EmitNeonCall(Intrin, Ops, "vsli_n");
9083 }
9084 case NEON::BI__builtin_neon_vsra_n_v:
9085 case NEON::BI__builtin_neon_vsraq_n_v:
9086 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9087 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
9088 return Builder.CreateAdd(Ops[0], Ops[1]);
9089 case NEON::BI__builtin_neon_vrsra_n_v:
9090 case NEON::BI__builtin_neon_vrsraq_n_v: {
9091 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
9092 SmallVector<llvm::Value*,2> TmpOps;
9093 TmpOps.push_back(Ops[1]);
9094 TmpOps.push_back(Ops[2]);
9095 Function* F = CGM.getIntrinsic(Int, Ty);
9096 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
9097 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
9098 return Builder.CreateAdd(Ops[0], tmp);
9099 }
9100 case NEON::BI__builtin_neon_vld1_v:
9101 case NEON::BI__builtin_neon_vld1q_v: {
9102 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::get(VTy, DefaultAS));
9103 auto Alignment = CharUnits::fromQuantity(
9104 BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
9105 return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
9106 }
9107 case NEON::BI__builtin_neon_vst1_v:
9108 case NEON::BI__builtin_neon_vst1q_v:
9109 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::get(VTy, DefaultAS));
9110 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
9111 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9112 case NEON::BI__builtin_neon_vld1_lane_v:
9113 case NEON::BI__builtin_neon_vld1q_lane_v: {
9114 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9115 Ty = llvm::PointerType::get(VTy->getElementType(), DefaultAS);
9116 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9117 auto Alignment = CharUnits::fromQuantity(
9118 BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
9119 Ops[0] =
9120 Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
9121 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
9122 }
9123 case NEON::BI__builtin_neon_vld1_dup_v:
9124 case NEON::BI__builtin_neon_vld1q_dup_v: {
9125 Value *V = UndefValue::get(Ty);
9126 Ty = llvm::PointerType::get(VTy->getElementType(), DefaultAS);
9127 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9128 auto Alignment = CharUnits::fromQuantity(
9129 BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
9130 Ops[0] =
9131 Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
9132 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
9133 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
9134 return EmitNeonSplat(Ops[0], CI);
9135 }
9136 case NEON::BI__builtin_neon_vst1_lane_v:
9137 case NEON::BI__builtin_neon_vst1q_lane_v:
9138 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9139 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
9140 Ty = llvm::PointerType::get(Ops[1]->getType(), DefaultAS);
9141 return Builder.CreateDefaultAlignedStore(Ops[1],
9142 Builder.CreateBitCast(Ops[0], Ty));
9143 case NEON::BI__builtin_neon_vld2_v:
9144 case NEON::BI__builtin_neon_vld2q_v: {
9145 llvm::Type *PTy = llvm::PointerType::get(VTy, DefaultAS);
9146 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9147 llvm::Type *Tys[2] = { VTy, PTy };
9148 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
9149 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
9150 Ops[0] = Builder.CreateBitCast(Ops[0],
9151 CGM.getPointerInDefaultAS(Ops[1]->getType()));
9152 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9153 }
9154 case NEON::BI__builtin_neon_vld3_v:
9155 case NEON::BI__builtin_neon_vld3q_v: {
9156 llvm::Type *PTy = llvm::PointerType::get(VTy, DefaultAS);
9157 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9158 llvm::Type *Tys[2] = { VTy, PTy };
9159 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
9160 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
9161 Ops[0] = Builder.CreateBitCast(Ops[0],
9162 CGM.getPointerInDefaultAS(Ops[1]->getType()));
9163 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9164 }
9165 case NEON::BI__builtin_neon_vld4_v:
9166 case NEON::BI__builtin_neon_vld4q_v: {
9167 llvm::Type *PTy = llvm::PointerType::get(VTy, DefaultAS);
9168 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9169 llvm::Type *Tys[2] = { VTy, PTy };
9170 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
9171 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
9172 Ops[0] = Builder.CreateBitCast(Ops[0],
9173 llvm::PointerType::get(Ops[1]->getType(), DefaultAS));
9174 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9175 }
9176 case NEON::BI__builtin_neon_vld2_dup_v:
9177 case NEON::BI__builtin_neon_vld2q_dup_v: {
9178 llvm::Type *PTy =
9179 llvm::PointerType::get(VTy->getElementType(), DefaultAS);
9180 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9181 llvm::Type *Tys[2] = { VTy, PTy };
9182 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
9183 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
9184 Ops[0] = Builder.CreateBitCast(Ops[0],
9185 llvm::PointerType::get(Ops[1]->getType(), DefaultAS));
9186 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9187 }
9188 case NEON::BI__builtin_neon_vld3_dup_v:
9189 case NEON::BI__builtin_neon_vld3q_dup_v: {
9190 llvm::Type *PTy =
9191 llvm::PointerType::get(VTy->getElementType(), DefaultAS);
9192 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9193 llvm::Type *Tys[2] = { VTy, PTy };
9194 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
9195 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
9196 Ops[0] = Builder.CreateBitCast(Ops[0],
9197 llvm::PointerType::get(Ops[1]->getType(), DefaultAS));
9198 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9199 }
9200 case NEON::BI__builtin_neon_vld4_dup_v:
9201 case NEON::BI__builtin_neon_vld4q_dup_v: {
9202 llvm::Type *PTy =
9203 llvm::PointerType::get(VTy->getElementType(), DefaultAS);
9204 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
9205 llvm::Type *Tys[2] = { VTy, PTy };
9206 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
9207 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
9208 Ops[0] = Builder.CreateBitCast(Ops[0],
9209 llvm::PointerType::get(Ops[1]->getType(), DefaultAS));
9210 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9211 }
9212 case NEON::BI__builtin_neon_vld2_lane_v:
9213 case NEON::BI__builtin_neon_vld2q_lane_v: {
9214 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9215 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
9216 Ops.push_back(Ops[1]);
9217 Ops.erase(Ops.begin()+1);
9218 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9219 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9220 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
9221 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
9222 Ty = llvm::PointerType::get(Ops[1]->getType(), DefaultAS);
9223 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9224 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9225 }
9226 case NEON::BI__builtin_neon_vld3_lane_v:
9227 case NEON::BI__builtin_neon_vld3q_lane_v: {
9228 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9229 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
9230 Ops.push_back(Ops[1]);
9231 Ops.erase(Ops.begin()+1);
9232 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9233 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9234 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
9235 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
9236 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
9237 Ty = llvm::PointerType::get(Ops[1]->getType(), DefaultAS);
9238 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9239 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9240 }
9241 case NEON::BI__builtin_neon_vld4_lane_v:
9242 case NEON::BI__builtin_neon_vld4q_lane_v: {
9243 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
9244 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
9245 Ops.push_back(Ops[1]);
9246 Ops.erase(Ops.begin()+1);
9247 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9248 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9249 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
9250 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
9251 Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
9252 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
9253 Ty = llvm::PointerType::get(Ops[1]->getType(), DefaultAS);
9254 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9255 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
9256 }
9257 case NEON::BI__builtin_neon_vst2_v:
9258 case NEON::BI__builtin_neon_vst2q_v: {
9259 Ops.push_back(Ops[0]);
9260 Ops.erase(Ops.begin());
9261 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
9262 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
9263 Ops, "");
9264 }
9265 case NEON::BI__builtin_neon_vst2_lane_v:
9266 case NEON::BI__builtin_neon_vst2q_lane_v: {
9267 Ops.push_back(Ops[0]);
9268 Ops.erase(Ops.begin());
9269 Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
9270 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
9271 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
9272 Ops, "");
9273 }
9274 case NEON::BI__builtin_neon_vst3_v:
9275 case NEON::BI__builtin_neon_vst3q_v: {
9276 Ops.push_back(Ops[0]);
9277 Ops.erase(Ops.begin());
9278 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
9279 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
9280 Ops, "");
9281 }
9282 case NEON::BI__builtin_neon_vst3_lane_v:
9283 case NEON::BI__builtin_neon_vst3q_lane_v: {
9284 Ops.push_back(Ops[0]);
9285 Ops.erase(Ops.begin());
9286 Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
9287 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
9288 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
9289 Ops, "");
9290 }
9291 case NEON::BI__builtin_neon_vst4_v:
9292 case NEON::BI__builtin_neon_vst4q_v: {
9293 Ops.push_back(Ops[0]);
9294 Ops.erase(Ops.begin());
9295 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
9296 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
9297 Ops, "");
9298 }
9299 case NEON::BI__builtin_neon_vst4_lane_v:
9300 case NEON::BI__builtin_neon_vst4q_lane_v: {
9301 Ops.push_back(Ops[0]);
9302 Ops.erase(Ops.begin());
9303 Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
9304 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
9305 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
9306 Ops, "");
9307 }
9308 case NEON::BI__builtin_neon_vtrn_v:
9309 case NEON::BI__builtin_neon_vtrnq_v: {
9310 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::get(Ty, DefaultAS));
9311 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9312 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9313 Value *SV = nullptr;
9314
9315 for (unsigned vi = 0; vi != 2; ++vi) {
9316 SmallVector<uint32_t, 16> Indices;
9317 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
9318 Indices.push_back(i+vi);
9319 Indices.push_back(i+e+vi);
9320 }
9321 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9322 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
9323 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9324 }
9325 return SV;
9326 }
9327 case NEON::BI__builtin_neon_vuzp_v:
9328 case NEON::BI__builtin_neon_vuzpq_v: {
9329 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::get(Ty, DefaultAS));
9330 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9331 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9332 Value *SV = nullptr;
9333
9334 for (unsigned vi = 0; vi != 2; ++vi) {
9335 SmallVector<uint32_t, 16> Indices;
9336 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
9337 Indices.push_back(2*i+vi);
9338
9339 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9340 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
9341 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9342 }
9343 return SV;
9344 }
9345 case NEON::BI__builtin_neon_vzip_v:
9346 case NEON::BI__builtin_neon_vzipq_v: {
9347 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::get(Ty, DefaultAS));
9348 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9349 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
9350 Value *SV = nullptr;
9351
9352 for (unsigned vi = 0; vi != 2; ++vi) {
9353 SmallVector<uint32_t, 16> Indices;
9354 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
9355 Indices.push_back((i + vi*e) >> 1);
9356 Indices.push_back(((i + vi*e) >> 1)+e);
9357 }
9358 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
9359 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
9360 SV = Builder.CreateDefaultAlignedStore(SV, Addr);
9361 }
9362 return SV;
9363 }
9364 case NEON::BI__builtin_neon_vqtbl1q_v: {
9365 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
9366 Ops, "vtbl1");
9367 }
9368 case NEON::BI__builtin_neon_vqtbl2q_v: {
9369 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
9370 Ops, "vtbl2");
9371 }
9372 case NEON::BI__builtin_neon_vqtbl3q_v: {
9373 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
9374 Ops, "vtbl3");
9375 }
9376 case NEON::BI__builtin_neon_vqtbl4q_v: {
9377 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
9378 Ops, "vtbl4");
9379 }
9380 case NEON::BI__builtin_neon_vqtbx1q_v: {
9381 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
9382 Ops, "vtbx1");
9383 }
9384 case NEON::BI__builtin_neon_vqtbx2q_v: {
9385 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
9386 Ops, "vtbx2");
9387 }
9388 case NEON::BI__builtin_neon_vqtbx3q_v: {
9389 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
9390 Ops, "vtbx3");
9391 }
9392 case NEON::BI__builtin_neon_vqtbx4q_v: {
9393 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
9394 Ops, "vtbx4");
9395 }
9396 case NEON::BI__builtin_neon_vsqadd_v:
9397 case NEON::BI__builtin_neon_vsqaddq_v: {
9398 Int = Intrinsic::aarch64_neon_usqadd;
9399 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
9400 }
9401 case NEON::BI__builtin_neon_vuqadd_v:
9402 case NEON::BI__builtin_neon_vuqaddq_v: {
9403 Int = Intrinsic::aarch64_neon_suqadd;
9404 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
9405 }
9406 case AArch64::BI_BitScanForward:
9407 case AArch64::BI_BitScanForward64:
9408 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
9409 case AArch64::BI_BitScanReverse:
9410 case AArch64::BI_BitScanReverse64:
9411 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
9412 case AArch64::BI_InterlockedAnd64:
9413 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
9414 case AArch64::BI_InterlockedExchange64:
9415 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
9416 case AArch64::BI_InterlockedExchangeAdd64:
9417 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
9418 case AArch64::BI_InterlockedExchangeSub64:
9419 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
9420 case AArch64::BI_InterlockedOr64:
9421 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
9422 case AArch64::BI_InterlockedXor64:
9423 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
9424 case AArch64::BI_InterlockedDecrement64:
9425 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
9426 case AArch64::BI_InterlockedIncrement64:
9427 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
9428 case AArch64::BI_InterlockedExchangeAdd8_acq:
9429 case AArch64::BI_InterlockedExchangeAdd16_acq:
9430 case AArch64::BI_InterlockedExchangeAdd_acq:
9431 case AArch64::BI_InterlockedExchangeAdd64_acq:
9432 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_acq, E);
9433 case AArch64::BI_InterlockedExchangeAdd8_rel:
9434 case AArch64::BI_InterlockedExchangeAdd16_rel:
9435 case AArch64::BI_InterlockedExchangeAdd_rel:
9436 case AArch64::BI_InterlockedExchangeAdd64_rel:
9437 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_rel, E);
9438 case AArch64::BI_InterlockedExchangeAdd8_nf:
9439 case AArch64::BI_InterlockedExchangeAdd16_nf:
9440 case AArch64::BI_InterlockedExchangeAdd_nf:
9441 case AArch64::BI_InterlockedExchangeAdd64_nf:
9442 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd_nf, E);
9443 case AArch64::BI_InterlockedExchange8_acq:
9444 case AArch64::BI_InterlockedExchange16_acq:
9445 case AArch64::BI_InterlockedExchange_acq:
9446 case AArch64::BI_InterlockedExchange64_acq:
9447 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_acq, E);
9448 case AArch64::BI_InterlockedExchange8_rel:
9449 case AArch64::BI_InterlockedExchange16_rel:
9450 case AArch64::BI_InterlockedExchange_rel:
9451 case AArch64::BI_InterlockedExchange64_rel:
9452 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_rel, E);
9453 case AArch64::BI_InterlockedExchange8_nf:
9454 case AArch64::BI_InterlockedExchange16_nf:
9455 case AArch64::BI_InterlockedExchange_nf:
9456 case AArch64::BI_InterlockedExchange64_nf:
9457 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange_nf, E);
9458 case AArch64::BI_InterlockedCompareExchange8_acq:
9459 case AArch64::BI_InterlockedCompareExchange16_acq:
9460 case AArch64::BI_InterlockedCompareExchange_acq:
9461 case AArch64::BI_InterlockedCompareExchange64_acq:
9462 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_acq, E);
9463 case AArch64::BI_InterlockedCompareExchange8_rel:
9464 case AArch64::BI_InterlockedCompareExchange16_rel:
9465 case AArch64::BI_InterlockedCompareExchange_rel:
9466 case AArch64::BI_InterlockedCompareExchange64_rel:
9467 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_rel, E);
9468 case AArch64::BI_InterlockedCompareExchange8_nf:
9469 case AArch64::BI_InterlockedCompareExchange16_nf:
9470 case AArch64::BI_InterlockedCompareExchange_nf:
9471 case AArch64::BI_InterlockedCompareExchange64_nf:
9472 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedCompareExchange_nf, E);
9473 case AArch64::BI_InterlockedOr8_acq:
9474 case AArch64::BI_InterlockedOr16_acq:
9475 case AArch64::BI_InterlockedOr_acq:
9476 case AArch64::BI_InterlockedOr64_acq:
9477 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_acq, E);
9478 case AArch64::BI_InterlockedOr8_rel:
9479 case AArch64::BI_InterlockedOr16_rel:
9480 case AArch64::BI_InterlockedOr_rel:
9481 case AArch64::BI_InterlockedOr64_rel:
9482 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_rel, E);
9483 case AArch64::BI_InterlockedOr8_nf:
9484 case AArch64::BI_InterlockedOr16_nf:
9485 case AArch64::BI_InterlockedOr_nf:
9486 case AArch64::BI_InterlockedOr64_nf:
9487 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr_nf, E);
9488 case AArch64::BI_InterlockedXor8_acq:
9489 case AArch64::BI_InterlockedXor16_acq:
9490 case AArch64::BI_InterlockedXor_acq:
9491 case AArch64::BI_InterlockedXor64_acq:
9492 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_acq, E);
9493 case AArch64::BI_InterlockedXor8_rel:
9494 case AArch64::BI_InterlockedXor16_rel:
9495 case AArch64::BI_InterlockedXor_rel:
9496 case AArch64::BI_InterlockedXor64_rel:
9497 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_rel, E);
9498 case AArch64::BI_InterlockedXor8_nf:
9499 case AArch64::BI_InterlockedXor16_nf:
9500 case AArch64::BI_InterlockedXor_nf:
9501 case AArch64::BI_InterlockedXor64_nf:
9502 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor_nf, E);
9503 case AArch64::BI_InterlockedAnd8_acq:
9504 case AArch64::BI_InterlockedAnd16_acq:
9505 case AArch64::BI_InterlockedAnd_acq:
9506 case AArch64::BI_InterlockedAnd64_acq:
9507 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_acq, E);
9508 case AArch64::BI_InterlockedAnd8_rel:
9509 case AArch64::BI_InterlockedAnd16_rel:
9510 case AArch64::BI_InterlockedAnd_rel:
9511 case AArch64::BI_InterlockedAnd64_rel:
9512 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_rel, E);
9513 case AArch64::BI_InterlockedAnd8_nf:
9514 case AArch64::BI_InterlockedAnd16_nf:
9515 case AArch64::BI_InterlockedAnd_nf:
9516 case AArch64::BI_InterlockedAnd64_nf:
9517 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd_nf, E);
9518 case AArch64::BI_InterlockedIncrement16_acq:
9519 case AArch64::BI_InterlockedIncrement_acq:
9520 case AArch64::BI_InterlockedIncrement64_acq:
9521 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_acq, E);
9522 case AArch64::BI_InterlockedIncrement16_rel:
9523 case AArch64::BI_InterlockedIncrement_rel:
9524 case AArch64::BI_InterlockedIncrement64_rel:
9525 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_rel, E);
9526 case AArch64::BI_InterlockedIncrement16_nf:
9527 case AArch64::BI_InterlockedIncrement_nf:
9528 case AArch64::BI_InterlockedIncrement64_nf:
9529 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement_nf, E);
9530 case AArch64::BI_InterlockedDecrement16_acq:
9531 case AArch64::BI_InterlockedDecrement_acq:
9532 case AArch64::BI_InterlockedDecrement64_acq:
9533 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_acq, E);
9534 case AArch64::BI_InterlockedDecrement16_rel:
9535 case AArch64::BI_InterlockedDecrement_rel:
9536 case AArch64::BI_InterlockedDecrement64_rel:
9537 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_rel, E);
9538 case AArch64::BI_InterlockedDecrement16_nf:
9539 case AArch64::BI_InterlockedDecrement_nf:
9540 case AArch64::BI_InterlockedDecrement64_nf:
9541 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement_nf, E);
9542
9543 case AArch64::BI_InterlockedAdd: {
9544 Value *Arg0 = EmitScalarExpr(E->getArg(0));
9545 Value *Arg1 = EmitScalarExpr(E->getArg(1));
9546 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
9547 AtomicRMWInst::Add, Arg0, Arg1,
9548 llvm::AtomicOrdering::SequentiallyConsistent);
9549 return Builder.CreateAdd(RMWI, Arg1);
9550 }
9551 }
9552}
9553
9554llvm::Value *CodeGenFunction::
9555BuildVector(ArrayRef<llvm::Value*> Ops) {
9556 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
9557 "Not a power-of-two sized vector!");
9558 bool AllConstants = true;
9559 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
9560 AllConstants &= isa<Constant>(Ops[i]);
9561
9562 // If this is a constant vector, create a ConstantVector.
9563 if (AllConstants) {
9564 SmallVector<llvm::Constant*, 16> CstOps;
9565 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9566 CstOps.push_back(cast<Constant>(Ops[i]));
9567 return llvm::ConstantVector::get(CstOps);
9568 }
9569
9570 // Otherwise, insertelement the values to build the vector.
9571 Value *Result =
9572 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
9573
9574 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9575 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
9576
9577 return Result;
9578}
9579
9580// Convert the mask from an integer type to a vector of i1.
9581static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
9582 unsigned NumElts) {
9583
9584 llvm::VectorType *MaskTy = llvm::VectorType::get(CGF.Builder.getInt1Ty(),
9585 cast<IntegerType>(Mask->getType())->getBitWidth());
9586 Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
9587
9588 // If we have less than 8 elements, then the starting mask was an i8 and
9589 // we need to extract down to the right number of elements.
9590 if (NumElts < 8) {
9591 uint32_t Indices[4];
9592 for (unsigned i = 0; i != NumElts; ++i)
9593 Indices[i] = i;
9594 MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
9595 makeArrayRef(Indices, NumElts),
9596 "extract");
9597 }
9598 return MaskVec;
9599}
9600
9601static Value *EmitX86MaskedStore(CodeGenFunction &CGF,
9602 ArrayRef<Value *> Ops,
9603 unsigned Align) {
9604 // Cast the pointer to right type.
9605 Value *Ptr =
9606 CGF.Builder.CreateBitCast(Ops[0], CGF.CGM.getPointerInDefaultAS(Ops[1]->getType()));
9607
9608 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9609 Ops[1]->getType()->getVectorNumElements());
9610
9611 return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec);
9612}
9613
9614static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
9615 ArrayRef<Value *> Ops, unsigned Align) {
9616 // Cast the pointer to right type.
9617 Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
9618 CGF.CGM.getPointerInDefaultAS(Ops[1]->getType()));
9619
9620 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9621 Ops[1]->getType()->getVectorNumElements());
9622
9623 return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]);
9624}
9625
9626static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
9627 ArrayRef<Value *> Ops) {
9628 llvm::Type *ResultTy = Ops[1]->getType();
9629 llvm::Type *PtrTy = ResultTy->getVectorElementType();
9630
9631 // Cast the pointer to element type.
9632 Value *Ptr =
9633 CGF.Builder.CreateBitCast(Ops[0], CGF.CGM.getPointerInDefaultAS(PtrTy));
9634
9635 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9636 ResultTy->getVectorNumElements());
9637
9638 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
9639 ResultTy);
9640 return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
9641}
9642
9643static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
9644 ArrayRef<Value *> Ops,
9645 bool IsCompress) {
9646 llvm::Type *ResultTy = Ops[1]->getType();
9647
9648 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9649 ResultTy->getVectorNumElements());
9650
9651 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
9652 : Intrinsic::x86_avx512_mask_expand;
9653 llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
9654 return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
9655}
9656
9657static Value *EmitX86CompressStore(CodeGenFunction &CGF,
9658 ArrayRef<Value *> Ops) {
9659 llvm::Type *ResultTy = Ops[1]->getType();
9660 llvm::Type *PtrTy = ResultTy->getVectorElementType();
9661
9662 // Cast the pointer to element type.
9663 Value *Ptr =
9664 CGF.Builder.CreateBitCast(Ops[0], CGF.CGM.getPointerInDefaultAS(PtrTy));
9665
9666 Value *MaskVec = getMaskVecValue(CGF, Ops[2],
9667 ResultTy->getVectorNumElements());
9668
9669 llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
9670 ResultTy);
9671 return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
9672}
9673
9674static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
9675 ArrayRef<Value *> Ops,
9676 bool InvertLHS = false) {
9677 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
9678 Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
9679 Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
9680
9681 if (InvertLHS)
9682 LHS = CGF.Builder.CreateNot(LHS);
9683
9684 return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
9685 Ops[0]->getType());
9686}
9687
9688static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
9689 Value *Amt, bool IsRight) {
9690 llvm::Type *Ty = Op0->getType();
9691
9692 // Amount may be scalar immediate, in which case create a splat vector.
9693 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
9694 // we only care about the lowest log2 bits anyway.
9695 if (Amt->getType() != Ty) {
9696 unsigned NumElts = Ty->getVectorNumElements();
9697 Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
9698 Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
9699 }
9700
9701 unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
9702 Function *F = CGF.CGM.getIntrinsic(IID, Ty);
9703 return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
9704}
9705
9706static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
9707 bool IsSigned) {
9708 Value *Op0 = Ops[0];
9709 Value *Op1 = Ops[1];
9710 llvm::Type *Ty = Op0->getType();
9711 uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
9712
9713 CmpInst::Predicate Pred;
9714 switch (Imm) {
9715 case 0x0:
9716 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
9717 break;
9718 case 0x1:
9719 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
9720 break;
9721 case 0x2:
9722 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
9723 break;
9724 case 0x3:
9725 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
9726 break;
9727 case 0x4:
9728 Pred = ICmpInst::ICMP_EQ;
9729 break;
9730 case 0x5:
9731 Pred = ICmpInst::ICMP_NE;
9732 break;
9733 case 0x6:
9734 return llvm::Constant::getNullValue(Ty); // FALSE
9735 case 0x7:
9736 return llvm::Constant::getAllOnesValue(Ty); // TRUE
9737 default:
9738 llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
9739 }
9740
9741 Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
9742 Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
9743 return Res;
9744}
9745
9746static Value *EmitX86Select(CodeGenFunction &CGF,
9747 Value *Mask, Value *Op0, Value *Op1) {
9748
9749 // If the mask is all ones just return first argument.
9750 if (const auto *C = dyn_cast<Constant>(Mask))
9751 if (C->isAllOnesValue())
9752 return Op0;
9753
9754 Mask = getMaskVecValue(CGF, Mask, Op0->getType()->getVectorNumElements());
9755
9756 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
9757}
9758
9759static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
9760 Value *Mask, Value *Op0, Value *Op1) {
9761 // If the mask is all ones just return first argument.
9762 if (const auto *C = dyn_cast<Constant>(Mask))
9763 if (C->isAllOnesValue())
9764 return Op0;
9765
9766 llvm::VectorType *MaskTy =
9767 llvm::VectorType::get(CGF.Builder.getInt1Ty(),
9768 Mask->getType()->getIntegerBitWidth());
9769 Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
9770 Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
9771 return CGF.Builder.CreateSelect(Mask, Op0, Op1);
9772}
9773
9774static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
9775 unsigned NumElts, Value *MaskIn) {
9776 if (MaskIn) {
9777 const auto *C = dyn_cast<Constant>(MaskIn);
9778 if (!C || !C->isAllOnesValue())
9779 Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
9780 }
9781
9782 if (NumElts < 8) {
9783 uint32_t Indices[8];
9784 for (unsigned i = 0; i != NumElts; ++i)
9785 Indices[i] = i;
9786 for (unsigned i = NumElts; i != 8; ++i)
9787 Indices[i] = i % NumElts + NumElts;
9788 Cmp = CGF.Builder.CreateShuffleVector(
9789 Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
9790 }
9791
9792 return CGF.Builder.CreateBitCast(Cmp,
9793 IntegerType::get(CGF.getLLVMContext(),
9794 std::max(NumElts, 8U)));
9795}
9796
9797static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
9798 bool Signed, ArrayRef<Value *> Ops) {
9799 assert((Ops.size() == 2 || Ops.size() == 4) &&
9800 "Unexpected number of arguments");
9801 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
9802 Value *Cmp;
9803
9804 if (CC == 3) {
9805 Cmp = Constant::getNullValue(
9806 llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
9807 } else if (CC == 7) {
9808 Cmp = Constant::getAllOnesValue(
9809 llvm::VectorType::get(CGF.Builder.getInt1Ty(), NumElts));
9810 } else {
9811 ICmpInst::Predicate Pred;
9812 switch (CC) {
9813 default: llvm_unreachable("Unknown condition code");
9814 case 0: Pred = ICmpInst::ICMP_EQ; break;
9815 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
9816 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
9817 case 4: Pred = ICmpInst::ICMP_NE; break;
9818 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
9819 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
9820 }
9821 Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
9822 }
9823
9824 Value *MaskIn = nullptr;
9825 if (Ops.size() == 4)
9826 MaskIn = Ops[3];
9827
9828 return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
9829}
9830
9831static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
9832 Value *Zero = Constant::getNullValue(In->getType());
9833 return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
9834}
9835
9836static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
9837 ArrayRef<Value *> Ops, bool IsSigned) {
9838 unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
9839 llvm::Type *Ty = Ops[1]->getType();
9840
9841 Value *Res;
9842 if (Rnd != 4) {
9843 Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
9844 : Intrinsic::x86_avx512_uitofp_round;
9845 Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
9846 Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
9847 } else {
9848 Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
9849 : CGF.Builder.CreateUIToFP(Ops[0], Ty);
9850 }
9851
9852 return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
9853}
9854
9855static Value *EmitX86Abs(CodeGenFunction &CGF, ArrayRef<Value *> Ops) {
9856
9857 llvm::Type *Ty = Ops[0]->getType();
9858 Value *Zero = llvm::Constant::getNullValue(Ty);
9859 Value *Sub = CGF.Builder.CreateSub(Zero, Ops[0]);
9860 Value *Cmp = CGF.Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Zero);
9861 Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Sub);
9862 return Res;
9863}
9864
9865static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
9866 ArrayRef<Value *> Ops) {
9867 Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
9868 Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
9869
9870 assert(Ops.size() == 2);
9871 return Res;
9872}
9873
9874// Lowers X86 FMA intrinsics to IR.
9875static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
9876 unsigned BuiltinID, bool IsAddSub) {
9877
9878 bool Subtract = false;
9879 Intrinsic::ID IID = Intrinsic::not_intrinsic;
9880 switch (BuiltinID) {
9881 default: break;
9882 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
9883 Subtract = true;
9884 LLVM_FALLTHROUGH;
9885 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
9886 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
9887 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
9888 IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
9889 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
9890 Subtract = true;
9891 LLVM_FALLTHROUGH;
9892 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
9893 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
9894 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
9895 IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
9896 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
9897 Subtract = true;
9898 LLVM_FALLTHROUGH;
9899 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
9900 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
9901 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
9902 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
9903 break;
9904 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
9905 Subtract = true;
9906 LLVM_FALLTHROUGH;
9907 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
9908 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
9909 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
9910 IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
9911 break;
9912 }
9913
9914 Value *A = Ops[0];
9915 Value *B = Ops[1];
9916 Value *C = Ops[2];
9917
9918 if (Subtract)
9919 C = CGF.Builder.CreateFNeg(C);
9920
9921 Value *Res;
9922
9923 // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
9924 if (IID != Intrinsic::not_intrinsic &&
9925 cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4) {
9926 Function *Intr = CGF.CGM.getIntrinsic(IID);
9927 Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
9928 } else {
9929 llvm::Type *Ty = A->getType();
9930 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
9931 Res = CGF.Builder.CreateCall(FMA, {A, B, C} );
9932
9933 if (IsAddSub) {
9934 // Negate even elts in C using a mask.
9935 unsigned NumElts = Ty->getVectorNumElements();
9936 SmallVector<uint32_t, 16> Indices(NumElts);
9937 for (unsigned i = 0; i != NumElts; ++i)
9938 Indices[i] = i + (i % 2) * NumElts;
9939
9940 Value *NegC = CGF.Builder.CreateFNeg(C);
9941 Value *FMSub = CGF.Builder.CreateCall(FMA, {A, B, NegC} );
9942 Res = CGF.Builder.CreateShuffleVector(FMSub, Res, Indices);
9943 }
9944 }
9945
9946 // Handle any required masking.
9947 Value *MaskFalseVal = nullptr;
9948 switch (BuiltinID) {
9949 case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
9950 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
9951 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
9952 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
9953 MaskFalseVal = Ops[0];
9954 break;
9955 case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
9956 case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
9957 case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
9958 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
9959 MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
9960 break;
9961 case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
9962 case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
9963 case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
9964 case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
9965 case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
9966 case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
9967 case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
9968 case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
9969 MaskFalseVal = Ops[2];
9970 break;
9971 }
9972
9973 if (MaskFalseVal)
9974 return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
9975
9976 return Res;
9977}
9978
9979static Value *
9980EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
9981 Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
9982 bool NegAcc = false) {
9983 unsigned Rnd = 4;
9984 if (Ops.size() > 4)
9985 Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
9986
9987 if (NegAcc)
9988 Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
9989
9990 Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
9991 Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
9992 Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
9993 Value *Res;
9994 if (Rnd != 4) {
9995 Intrinsic::ID IID = Ops[0]->getType()->getPrimitiveSizeInBits() == 32 ?
9996 Intrinsic::x86_avx512_vfmadd_f32 :
9997 Intrinsic::x86_avx512_vfmadd_f64;
9998 Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
9999 {Ops[0], Ops[1], Ops[2], Ops[4]});
10000 } else {
10001 Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
10002 Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
10003 }
10004 // If we have more than 3 arguments, we need to do masking.
10005 if (Ops.size() > 3) {
10006 Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
10007 : Ops[PTIdx];
10008
10009 // If we negated the accumulator and the its the PassThru value we need to
10010 // bypass the negate. Conveniently Upper should be the same thing in this
10011 // case.
10012 if (NegAcc && PTIdx == 2)
10013 PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
10014
10015 Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
10016 }
10017 return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
10018}
10019
10020static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
10021 ArrayRef<Value *> Ops) {
10022 llvm::Type *Ty = Ops[0]->getType();
10023 // Arguments have a vXi32 type so cast to vXi64.
10024 Ty = llvm::VectorType::get(CGF.Int64Ty,
10025 Ty->getPrimitiveSizeInBits() / 64);
10026 Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
10027 Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
10028
10029 if (IsSigned) {
10030 // Shift left then arithmetic shift right.
10031 Constant *ShiftAmt = ConstantInt::get(Ty, 32);
10032 LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
10033 LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
10034 RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
10035 RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
10036 } else {
10037 // Clear the upper bits.
10038 Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
10039 LHS = CGF.Builder.CreateAnd(LHS, Mask);
10040 RHS = CGF.Builder.CreateAnd(RHS, Mask);
10041 }
10042
10043 return CGF.Builder.CreateMul(LHS, RHS);
10044}
10045
10046// Emit a masked pternlog intrinsic. This only exists because the header has to
10047// use a macro and we aren't able to pass the input argument to a pternlog
10048// builtin and a select builtin without evaluating it twice.
10049static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
10050 ArrayRef<Value *> Ops) {
10051 llvm::Type *Ty = Ops[0]->getType();
10052
10053 unsigned VecWidth = Ty->getPrimitiveSizeInBits();
10054 unsigned EltWidth = Ty->getScalarSizeInBits();
10055 Intrinsic::ID IID;
10056 if (VecWidth == 128 && EltWidth == 32)
10057 IID = Intrinsic::x86_avx512_pternlog_d_128;
10058 else if (VecWidth == 256 && EltWidth == 32)
10059 IID = Intrinsic::x86_avx512_pternlog_d_256;
10060 else if (VecWidth == 512 && EltWidth == 32)
10061 IID = Intrinsic::x86_avx512_pternlog_d_512;
10062 else if (VecWidth == 128 && EltWidth == 64)
10063 IID = Intrinsic::x86_avx512_pternlog_q_128;
10064 else if (VecWidth == 256 && EltWidth == 64)
10065 IID = Intrinsic::x86_avx512_pternlog_q_256;
10066 else if (VecWidth == 512 && EltWidth == 64)
10067 IID = Intrinsic::x86_avx512_pternlog_q_512;
10068 else
10069 llvm_unreachable("Unexpected intrinsic");
10070
10071 Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
10072 Ops.drop_back());
10073 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
10074 return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
10075}
10076
10077static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
10078 llvm::Type *DstTy) {
10079 unsigned NumberOfElements = DstTy->getVectorNumElements();
10080 Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
10081 return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
10082}
10083
10084// Emit addition or subtraction with signed/unsigned saturation.
10085static Value *EmitX86AddSubSatExpr(CodeGenFunction &CGF,
10086 ArrayRef<Value *> Ops, bool IsSigned,
10087 bool IsAddition) {
10088 Intrinsic::ID IID =
10089 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat)
10090 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat);
10091 llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
10092 return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
10093}
10094
10095Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
10096 const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
10097 StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
10098 return EmitX86CpuIs(CPUStr);
10099}
10100
10101// Convert a BF16 to a float.
10102static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
10103 const CallExpr *E,
10104 ArrayRef<Value *> Ops) {
10105 llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
10106 Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
10107 Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
10108 llvm::Type *ResultType = CGF.ConvertType(E->getType());
10109 Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
10110 return BitCast;
10111}
10112
10113Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
10114
10115 llvm::Type *Int32Ty = Builder.getInt32Ty();
10116
10117 // Matching the struct layout from the compiler-rt/libgcc structure that is
10118 // filled in:
10119 // unsigned int __cpu_vendor;
10120 // unsigned int __cpu_type;
10121 // unsigned int __cpu_subtype;
10122 // unsigned int __cpu_features[1];
10123 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
10124 llvm::ArrayType::get(Int32Ty, 1));
10125
10126 // Grab the global __cpu_model.
10127 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
10128 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
10129
10130 // Calculate the index needed to access the correct field based on the
10131 // range. Also adjust the expected value.
10132 unsigned Index;
10133 unsigned Value;
10134 std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
10135#define X86_VENDOR(ENUM, STRING) \
10136 .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
10137#define X86_CPU_TYPE_COMPAT_WITH_ALIAS(ARCHNAME, ENUM, STR, ALIAS) \
10138 .Cases(STR, ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
10139#define X86_CPU_TYPE_COMPAT(ARCHNAME, ENUM, STR) \
10140 .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
10141#define X86_CPU_SUBTYPE_COMPAT(ARCHNAME, ENUM, STR) \
10142 .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
10143#include "llvm/Support/X86TargetParser.def"
10144 .Default({0, 0});
10145 assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
10146
10147 // Grab the appropriate field from __cpu_model.
10148 llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
10149 ConstantInt::get(Int32Ty, Index)};
10150 llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
10151 CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
10152
10153 // Check the value of the field against the requested value.
10154 return Builder.CreateICmpEQ(CpuValue,
10155 llvm::ConstantInt::get(Int32Ty, Value));
10156}
10157
10158Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
10159 const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
10160 StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
10161 return EmitX86CpuSupports(FeatureStr);
10162}
10163
10164uint64_t
10165CodeGenFunction::GetX86CpuSupportsMask(ArrayRef<StringRef> FeatureStrs) {
10166 // Processor features and mapping to processor feature value.
10167 uint64_t FeaturesMask = 0;
10168 for (const StringRef &FeatureStr : FeatureStrs) {
10169 unsigned Feature =
10170 StringSwitch<unsigned>(FeatureStr)
10171#define X86_FEATURE_COMPAT(VAL, ENUM, STR) .Case(STR, VAL)
10172#include "llvm/Support/X86TargetParser.def"
10173 ;
10174 FeaturesMask |= (1ULL << Feature);
10175 }
10176 return FeaturesMask;
10177}
10178
10179Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
10180 return EmitX86CpuSupports(GetX86CpuSupportsMask(FeatureStrs));
10181}
10182
10183llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
10184 uint32_t Features1 = Lo_32(FeaturesMask);
10185 uint32_t Features2 = Hi_32(FeaturesMask);
10186
10187 Value *Result = Builder.getTrue();
10188
10189 if (Features1 != 0) {
10190 // Matching the struct layout from the compiler-rt/libgcc structure that is
10191 // filled in:
10192 // unsigned int __cpu_vendor;
10193 // unsigned int __cpu_type;
10194 // unsigned int __cpu_subtype;
10195 // unsigned int __cpu_features[1];
10196 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
10197 llvm::ArrayType::get(Int32Ty, 1));
10198
10199 // Grab the global __cpu_model.
10200 llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
10201 cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
10202
10203 // Grab the first (0th) element from the field __cpu_features off of the
10204 // global in the struct STy.
10205 Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
10206 Builder.getInt32(0)};
10207 Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
10208 Value *Features =
10209 Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
10210
10211 // Check the value of the bit corresponding to the feature requested.
10212 Value *Mask = Builder.getInt32(Features1);
10213 Value *Bitset = Builder.CreateAnd(Features, Mask);
10214 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
10215 Result = Builder.CreateAnd(Result, Cmp);
10216 }
10217
10218 if (Features2 != 0) {
10219 llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
10220 "__cpu_features2");
10221 cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
10222
10223 Value *Features =
10224 Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
10225
10226 // Check the value of the bit corresponding to the feature requested.
10227 Value *Mask = Builder.getInt32(Features2);
10228 Value *Bitset = Builder.CreateAnd(Features, Mask);
10229 Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
10230 Result = Builder.CreateAnd(Result, Cmp);
10231 }
10232
10233 return Result;
10234}
10235
10236Value *CodeGenFunction::EmitX86CpuInit() {
10237 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
10238 /*Variadic*/ false);
10239 llvm::FunctionCallee Func =
10240 CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
10241 cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
10242 cast<llvm::GlobalValue>(Func.getCallee())
10243 ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
10244 return Builder.CreateCall(Func);
10245}
10246
10247Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
10248 const CallExpr *E) {
10249 if (BuiltinID == X86::BI__builtin_cpu_is)
10250 return EmitX86CpuIs(E);
10251 if (BuiltinID == X86::BI__builtin_cpu_supports)
10252 return EmitX86CpuSupports(E);
10253 if (BuiltinID == X86::BI__builtin_cpu_init)
10254 return EmitX86CpuInit();
10255
10256 SmallVector<Value*, 4> Ops;
10257
10258 // Find out if any arguments are required to be integer constant expressions.
10259 unsigned ICEArguments = 0;
10260 ASTContext::GetBuiltinTypeError Error;
10261 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
10262 assert(Error == ASTContext::GE_None && "Should not codegen an error");
10263
10264 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
10265 // If this is a normal argument, just emit it as a scalar.
10266 if ((ICEArguments & (1 << i)) == 0) {
10267 Ops.push_back(EmitScalarExpr(E->getArg(i)));
10268 continue;
10269 }
10270
10271 // If this is required to be a constant, constant fold it so that we know
10272 // that the generated intrinsic gets a ConstantInt.
10273 llvm::APSInt Result;
10274 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
10275 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
10276 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
10277 }
10278
10279 // These exist so that the builtin that takes an immediate can be bounds
10280 // checked by clang to avoid passing bad immediates to the backend. Since
10281 // AVX has a larger immediate than SSE we would need separate builtins to
10282 // do the different bounds checking. Rather than create a clang specific
10283 // SSE only builtin, this implements eight separate builtins to match gcc
10284 // implementation.
10285 auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
10286 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
10287 llvm::Function *F = CGM.getIntrinsic(ID);
10288 return Builder.CreateCall(F, Ops);
10289 };
10290
10291 // For the vector forms of FP comparisons, translate the builtins directly to
10292 // IR.
10293 // TODO: The builtins could be removed if the SSE header files used vector
10294 // extension comparisons directly (vector ordered/unordered may need
10295 // additional support via __builtin_isnan()).
10296 auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred) {
10297 Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
10298 llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
10299 llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
10300 Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
10301 return Builder.CreateBitCast(Sext, FPVecTy);
10302 };
10303
10304 switch (BuiltinID) {
10305 default: return nullptr;
10306 case X86::BI_mm_prefetch: {
10307 Value *Address = Ops[0];
10308 ConstantInt *C = cast<ConstantInt>(Ops[1]);
10309 Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
10310 Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
10311 Value *Data = ConstantInt::get(Int32Ty, 1);
10312 Function *F = CGM.getIntrinsic(Intrinsic::prefetch);
10313 return Builder.CreateCall(F, {Address, RW, Locality, Data});
10314 }
10315 case X86::BI_mm_clflush: {
10316 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
10317 Ops[0]);
10318 }
10319 case X86::BI_mm_lfence: {
10320 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
10321 }
10322 case X86::BI_mm_mfence: {
10323 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
10324 }
10325 case X86::BI_mm_sfence: {
10326 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
10327 }
10328 case X86::BI_mm_pause: {
10329 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
10330 }
10331 case X86::BI__rdtsc: {
10332 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
10333 }
10334 case X86::BI__builtin_ia32_rdtscp: {
10335 Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
10336 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
10337 Ops[0]);
10338 return Builder.CreateExtractValue(Call, 0);
10339 }
10340 case X86::BI__builtin_ia32_lzcnt_u16:
10341 case X86::BI__builtin_ia32_lzcnt_u32:
10342 case X86::BI__builtin_ia32_lzcnt_u64: {
10343 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
10344 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
10345 }
10346 case X86::BI__builtin_ia32_tzcnt_u16:
10347 case X86::BI__builtin_ia32_tzcnt_u32:
10348 case X86::BI__builtin_ia32_tzcnt_u64: {
10349 Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
10350 return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
10351 }
10352 case X86::BI__builtin_ia32_undef128:
10353 case X86::BI__builtin_ia32_undef256:
10354 case X86::BI__builtin_ia32_undef512:
10355 // The x86 definition of "undef" is not the same as the LLVM definition
10356 // (PR32176). We leave optimizing away an unnecessary zero constant to the
10357 // IR optimizer and backend.
10358 // TODO: If we had a "freeze" IR instruction to generate a fixed undef
10359 // value, we should use that here instead of a zero.
10360 return llvm::Constant::getNullValue(ConvertType(E->getType()));
10361 case X86::BI__builtin_ia32_vec_init_v8qi:
10362 case X86::BI__builtin_ia32_vec_init_v4hi:
10363 case X86::BI__builtin_ia32_vec_init_v2si:
10364 return Builder.CreateBitCast(BuildVector(Ops),
10365 llvm::Type::getX86_MMXTy(getLLVMContext()));
10366 case X86::BI__builtin_ia32_vec_ext_v2si:
10367 case X86::BI__builtin_ia32_vec_ext_v16qi:
10368 case X86::BI__builtin_ia32_vec_ext_v8hi:
10369 case X86::BI__builtin_ia32_vec_ext_v4si:
10370 case X86::BI__builtin_ia32_vec_ext_v4sf:
10371 case X86::BI__builtin_ia32_vec_ext_v2di:
10372 case X86::BI__builtin_ia32_vec_ext_v32qi:
10373 case X86::BI__builtin_ia32_vec_ext_v16hi:
10374 case X86::BI__builtin_ia32_vec_ext_v8si:
10375 case X86::BI__builtin_ia32_vec_ext_v4di: {
10376 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
10377 uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
10378 Index &= NumElts - 1;
10379 // These builtins exist so we can ensure the index is an ICE and in range.
10380 // Otherwise we could just do this in the header file.
10381 return Builder.CreateExtractElement(Ops[0], Index);
10382 }
10383 case X86::BI__builtin_ia32_vec_set_v16qi:
10384 case X86::BI__builtin_ia32_vec_set_v8hi:
10385 case X86::BI__builtin_ia32_vec_set_v4si:
10386 case X86::BI__builtin_ia32_vec_set_v2di:
10387 case X86::BI__builtin_ia32_vec_set_v32qi:
10388 case X86::BI__builtin_ia32_vec_set_v16hi:
10389 case X86::BI__builtin_ia32_vec_set_v8si:
10390 case X86::BI__builtin_ia32_vec_set_v4di: {
10391 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
10392 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
10393 Index &= NumElts - 1;
10394 // These builtins exist so we can ensure the index is an ICE and in range.
10395 // Otherwise we could just do this in the header file.
10396 return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
10397 }
10398 case X86::BI_mm_setcsr:
10399 case X86::BI__builtin_ia32_ldmxcsr: {
10400 Address Tmp = CreateMemTemp(E->getArg(0)->getType());
10401 Builder.CreateStore(Ops[0], Tmp);
10402 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
10403 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
10404 }
10405 case X86::BI_mm_getcsr:
10406 case X86::BI__builtin_ia32_stmxcsr: {
10407 Address Tmp = CreateMemTemp(E->getType());
10408 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
10409 Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
10410 return Builder.CreateLoad(Tmp, "stmxcsr");
10411 }
10412 case X86::BI__builtin_ia32_xsave:
10413 case X86::BI__builtin_ia32_xsave64:
10414 case X86::BI__builtin_ia32_xrstor:
10415 case X86::BI__builtin_ia32_xrstor64:
10416 case X86::BI__builtin_ia32_xsaveopt:
10417 case X86::BI__builtin_ia32_xsaveopt64:
10418 case X86::BI__builtin_ia32_xrstors:
10419 case X86::BI__builtin_ia32_xrstors64:
10420 case X86::BI__builtin_ia32_xsavec:
10421 case X86::BI__builtin_ia32_xsavec64:
10422 case X86::BI__builtin_ia32_xsaves:
10423 case X86::BI__builtin_ia32_xsaves64:
10424 case X86::BI__builtin_ia32_xsetbv:
10425 case X86::BI_xsetbv: {
10426 Intrinsic::ID ID;
10427#define INTRINSIC_X86_XSAVE_ID(NAME) \
10428 case X86::BI__builtin_ia32_##NAME: \
10429 ID = Intrinsic::x86_##NAME; \
10430 break
10431 switch (BuiltinID) {
10432 default: llvm_unreachable("Unsupported intrinsic!");
10433 INTRINSIC_X86_XSAVE_ID(xsave);
10434 INTRINSIC_X86_XSAVE_ID(xsave64);
10435 INTRINSIC_X86_XSAVE_ID(xrstor);
10436 INTRINSIC_X86_XSAVE_ID(xrstor64);
10437 INTRINSIC_X86_XSAVE_ID(xsaveopt);
10438 INTRINSIC_X86_XSAVE_ID(xsaveopt64);
10439 INTRINSIC_X86_XSAVE_ID(xrstors);
10440 INTRINSIC_X86_XSAVE_ID(xrstors64);
10441 INTRINSIC_X86_XSAVE_ID(xsavec);
10442 INTRINSIC_X86_XSAVE_ID(xsavec64);
10443 INTRINSIC_X86_XSAVE_ID(xsaves);
10444 INTRINSIC_X86_XSAVE_ID(xsaves64);
10445 INTRINSIC_X86_XSAVE_ID(xsetbv);
10446 case X86::BI_xsetbv:
10447 ID = Intrinsic::x86_xsetbv;
10448 break;
10449 }
10450#undef INTRINSIC_X86_XSAVE_ID
10451 Value *Mhi = Builder.CreateTrunc(
10452 Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
10453 Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
10454 Ops[1] = Mhi;
10455 Ops.push_back(Mlo);
10456 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
10457 }
10458 case X86::BI__builtin_ia32_xgetbv:
10459 case X86::BI_xgetbv:
10460 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
10461 case X86::BI__builtin_ia32_storedqudi128_mask:
10462 case X86::BI__builtin_ia32_storedqusi128_mask:
10463 case X86::BI__builtin_ia32_storedquhi128_mask:
10464 case X86::BI__builtin_ia32_storedquqi128_mask:
10465 case X86::BI__builtin_ia32_storeupd128_mask:
10466 case X86::BI__builtin_ia32_storeups128_mask:
10467 case X86::BI__builtin_ia32_storedqudi256_mask:
10468 case X86::BI__builtin_ia32_storedqusi256_mask:
10469 case X86::BI__builtin_ia32_storedquhi256_mask:
10470 case X86::BI__builtin_ia32_storedquqi256_mask:
10471 case X86::BI__builtin_ia32_storeupd256_mask:
10472 case X86::BI__builtin_ia32_storeups256_mask:
10473 case X86::BI__builtin_ia32_storedqudi512_mask:
10474 case X86::BI__builtin_ia32_storedqusi512_mask:
10475 case X86::BI__builtin_ia32_storedquhi512_mask:
10476 case X86::BI__builtin_ia32_storedquqi512_mask:
10477 case X86::BI__builtin_ia32_storeupd512_mask:
10478 case X86::BI__builtin_ia32_storeups512_mask:
10479 return EmitX86MaskedStore(*this, Ops, 1);
10480
10481 case X86::BI__builtin_ia32_storess128_mask:
10482 case X86::BI__builtin_ia32_storesd128_mask: {
10483 return EmitX86MaskedStore(*this, Ops, 1);
10484 }
10485 case X86::BI__builtin_ia32_vpopcntb_128:
10486 case X86::BI__builtin_ia32_vpopcntd_128:
10487 case X86::BI__builtin_ia32_vpopcntq_128:
10488 case X86::BI__builtin_ia32_vpopcntw_128:
10489 case X86::BI__builtin_ia32_vpopcntb_256:
10490 case X86::BI__builtin_ia32_vpopcntd_256:
10491 case X86::BI__builtin_ia32_vpopcntq_256:
10492 case X86::BI__builtin_ia32_vpopcntw_256:
10493 case X86::BI__builtin_ia32_vpopcntb_512:
10494 case X86::BI__builtin_ia32_vpopcntd_512:
10495 case X86::BI__builtin_ia32_vpopcntq_512:
10496 case X86::BI__builtin_ia32_vpopcntw_512: {
10497 llvm::Type *ResultType = ConvertType(E->getType());
10498 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
10499 return Builder.CreateCall(F, Ops);
10500 }
10501 case X86::BI__builtin_ia32_cvtmask2b128:
10502 case X86::BI__builtin_ia32_cvtmask2b256:
10503 case X86::BI__builtin_ia32_cvtmask2b512:
10504 case X86::BI__builtin_ia32_cvtmask2w128:
10505 case X86::BI__builtin_ia32_cvtmask2w256:
10506 case X86::BI__builtin_ia32_cvtmask2w512:
10507 case X86::BI__builtin_ia32_cvtmask2d128:
10508 case X86::BI__builtin_ia32_cvtmask2d256:
10509 case X86::BI__builtin_ia32_cvtmask2d512:
10510 case X86::BI__builtin_ia32_cvtmask2q128:
10511 case X86::BI__builtin_ia32_cvtmask2q256:
10512 case X86::BI__builtin_ia32_cvtmask2q512:
10513 return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
10514
10515 case X86::BI__builtin_ia32_cvtb2mask128:
10516 case X86::BI__builtin_ia32_cvtb2mask256:
10517 case X86::BI__builtin_ia32_cvtb2mask512:
10518 case X86::BI__builtin_ia32_cvtw2mask128:
10519 case X86::BI__builtin_ia32_cvtw2mask256:
10520 case X86::BI__builtin_ia32_cvtw2mask512:
10521 case X86::BI__builtin_ia32_cvtd2mask128:
10522 case X86::BI__builtin_ia32_cvtd2mask256:
10523 case X86::BI__builtin_ia32_cvtd2mask512:
10524 case X86::BI__builtin_ia32_cvtq2mask128:
10525 case X86::BI__builtin_ia32_cvtq2mask256:
10526 case X86::BI__builtin_ia32_cvtq2mask512:
10527 return EmitX86ConvertToMask(*this, Ops[0]);
10528
10529 case X86::BI__builtin_ia32_cvtdq2ps512_mask:
10530 case X86::BI__builtin_ia32_cvtqq2ps512_mask:
10531 case X86::BI__builtin_ia32_cvtqq2pd512_mask:
10532 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
10533 case X86::BI__builtin_ia32_cvtudq2ps512_mask:
10534 case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
10535 case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
10536 return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
10537
10538 case X86::BI__builtin_ia32_vfmaddss3:
10539 case X86::BI__builtin_ia32_vfmaddsd3:
10540 case X86::BI__builtin_ia32_vfmaddss3_mask:
10541 case X86::BI__builtin_ia32_vfmaddsd3_mask:
10542 return EmitScalarFMAExpr(*this, Ops, Ops[0]);
10543 case X86::BI__builtin_ia32_vfmaddss:
10544 case X86::BI__builtin_ia32_vfmaddsd:
10545 return EmitScalarFMAExpr(*this, Ops,
10546 Constant::getNullValue(Ops[0]->getType()));
10547 case X86::BI__builtin_ia32_vfmaddss3_maskz:
10548 case X86::BI__builtin_ia32_vfmaddsd3_maskz:
10549 return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
10550 case X86::BI__builtin_ia32_vfmaddss3_mask3:
10551 case X86::BI__builtin_ia32_vfmaddsd3_mask3:
10552 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
10553 case X86::BI__builtin_ia32_vfmsubss3_mask3:
10554 case X86::BI__builtin_ia32_vfmsubsd3_mask3:
10555 return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
10556 /*NegAcc*/true);
10557 case X86::BI__builtin_ia32_vfmaddps:
10558 case X86::BI__builtin_ia32_vfmaddpd:
10559 case X86::BI__builtin_ia32_vfmaddps256:
10560 case X86::BI__builtin_ia32_vfmaddpd256:
10561 case X86::BI__builtin_ia32_vfmaddps512_mask:
10562 case X86::BI__builtin_ia32_vfmaddps512_maskz:
10563 case X86::BI__builtin_ia32_vfmaddps512_mask3:
10564 case X86::BI__builtin_ia32_vfmsubps512_mask3:
10565 case X86::BI__builtin_ia32_vfmaddpd512_mask:
10566 case X86::BI__builtin_ia32_vfmaddpd512_maskz:
10567 case X86::BI__builtin_ia32_vfmaddpd512_mask3:
10568 case X86::BI__builtin_ia32_vfmsubpd512_mask3:
10569 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
10570 case X86::BI__builtin_ia32_vfmaddsubps:
10571 case X86::BI__builtin_ia32_vfmaddsubpd:
10572 case X86::BI__builtin_ia32_vfmaddsubps256:
10573 case X86::BI__builtin_ia32_vfmaddsubpd256:
10574 case X86::BI__builtin_ia32_vfmaddsubps512_mask:
10575 case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
10576 case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
10577 case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
10578 case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
10579 case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
10580 case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
10581 case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
10582 return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
10583
10584 case X86::BI__builtin_ia32_movdqa32store128_mask:
10585 case X86::BI__builtin_ia32_movdqa64store128_mask:
10586 case X86::BI__builtin_ia32_storeaps128_mask:
10587 case X86::BI__builtin_ia32_storeapd128_mask:
10588 case X86::BI__builtin_ia32_movdqa32store256_mask:
10589 case X86::BI__builtin_ia32_movdqa64store256_mask:
10590 case X86::BI__builtin_ia32_storeaps256_mask:
10591 case X86::BI__builtin_ia32_storeapd256_mask:
10592 case X86::BI__builtin_ia32_movdqa32store512_mask:
10593 case X86::BI__builtin_ia32_movdqa64store512_mask:
10594 case X86::BI__builtin_ia32_storeaps512_mask:
10595 case X86::BI__builtin_ia32_storeapd512_mask: {
10596 unsigned Align =
10597 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
10598 return EmitX86MaskedStore(*this, Ops, Align);
10599 }
10600 case X86::BI__builtin_ia32_loadups128_mask:
10601 case X86::BI__builtin_ia32_loadups256_mask:
10602 case X86::BI__builtin_ia32_loadups512_mask:
10603 case X86::BI__builtin_ia32_loadupd128_mask:
10604 case X86::BI__builtin_ia32_loadupd256_mask:
10605 case X86::BI__builtin_ia32_loadupd512_mask:
10606 case X86::BI__builtin_ia32_loaddquqi128_mask:
10607 case X86::BI__builtin_ia32_loaddquqi256_mask:
10608 case X86::BI__builtin_ia32_loaddquqi512_mask:
10609 case X86::BI__builtin_ia32_loaddquhi128_mask:
10610 case X86::BI__builtin_ia32_loaddquhi256_mask:
10611 case X86::BI__builtin_ia32_loaddquhi512_mask:
10612 case X86::BI__builtin_ia32_loaddqusi128_mask:
10613 case X86::BI__builtin_ia32_loaddqusi256_mask:
10614 case X86::BI__builtin_ia32_loaddqusi512_mask:
10615 case X86::BI__builtin_ia32_loaddqudi128_mask:
10616 case X86::BI__builtin_ia32_loaddqudi256_mask:
10617 case X86::BI__builtin_ia32_loaddqudi512_mask:
10618 return EmitX86MaskedLoad(*this, Ops, 1);
10619
10620 case X86::BI__builtin_ia32_loadss128_mask:
10621 case X86::BI__builtin_ia32_loadsd128_mask:
10622 return EmitX86MaskedLoad(*this, Ops, 1);
10623
10624 case X86::BI__builtin_ia32_loadaps128_mask:
10625 case X86::BI__builtin_ia32_loadaps256_mask:
10626 case X86::BI__builtin_ia32_loadaps512_mask:
10627 case X86::BI__builtin_ia32_loadapd128_mask:
10628 case X86::BI__builtin_ia32_loadapd256_mask:
10629 case X86::BI__builtin_ia32_loadapd512_mask:
10630 case X86::BI__builtin_ia32_movdqa32load128_mask:
10631 case X86::BI__builtin_ia32_movdqa32load256_mask:
10632 case X86::BI__builtin_ia32_movdqa32load512_mask:
10633 case X86::BI__builtin_ia32_movdqa64load128_mask:
10634 case X86::BI__builtin_ia32_movdqa64load256_mask:
10635 case X86::BI__builtin_ia32_movdqa64load512_mask: {
10636 unsigned Align =
10637 getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
10638 return EmitX86MaskedLoad(*this, Ops, Align);
10639 }
10640
10641 case X86::BI__builtin_ia32_expandloaddf128_mask:
10642 case X86::BI__builtin_ia32_expandloaddf256_mask:
10643 case X86::BI__builtin_ia32_expandloaddf512_mask:
10644 case X86::BI__builtin_ia32_expandloadsf128_mask:
10645 case X86::BI__builtin_ia32_expandloadsf256_mask:
10646 case X86::BI__builtin_ia32_expandloadsf512_mask:
10647 case X86::BI__builtin_ia32_expandloaddi128_mask:
10648 case X86::BI__builtin_ia32_expandloaddi256_mask:
10649 case X86::BI__builtin_ia32_expandloaddi512_mask:
10650 case X86::BI__builtin_ia32_expandloadsi128_mask:
10651 case X86::BI__builtin_ia32_expandloadsi256_mask:
10652 case X86::BI__builtin_ia32_expandloadsi512_mask:
10653 case X86::BI__builtin_ia32_expandloadhi128_mask:
10654 case X86::BI__builtin_ia32_expandloadhi256_mask:
10655 case X86::BI__builtin_ia32_expandloadhi512_mask:
10656 case X86::BI__builtin_ia32_expandloadqi128_mask:
10657 case X86::BI__builtin_ia32_expandloadqi256_mask:
10658 case X86::BI__builtin_ia32_expandloadqi512_mask:
10659 return EmitX86ExpandLoad(*this, Ops);
10660
10661 case X86::BI__builtin_ia32_compressstoredf128_mask:
10662 case X86::BI__builtin_ia32_compressstoredf256_mask:
10663 case X86::BI__builtin_ia32_compressstoredf512_mask:
10664 case X86::BI__builtin_ia32_compressstoresf128_mask:
10665 case X86::BI__builtin_ia32_compressstoresf256_mask:
10666 case X86::BI__builtin_ia32_compressstoresf512_mask:
10667 case X86::BI__builtin_ia32_compressstoredi128_mask:
10668 case X86::BI__builtin_ia32_compressstoredi256_mask:
10669 case X86::BI__builtin_ia32_compressstoredi512_mask:
10670 case X86::BI__builtin_ia32_compressstoresi128_mask:
10671 case X86::BI__builtin_ia32_compressstoresi256_mask:
10672 case X86::BI__builtin_ia32_compressstoresi512_mask:
10673 case X86::BI__builtin_ia32_compressstorehi128_mask:
10674 case X86::BI__builtin_ia32_compressstorehi256_mask:
10675 case X86::BI__builtin_ia32_compressstorehi512_mask:
10676 case X86::BI__builtin_ia32_compressstoreqi128_mask:
10677 case X86::BI__builtin_ia32_compressstoreqi256_mask:
10678 case X86::BI__builtin_ia32_compressstoreqi512_mask:
10679 return EmitX86CompressStore(*this, Ops);
10680
10681 case X86::BI__builtin_ia32_expanddf128_mask:
10682 case X86::BI__builtin_ia32_expanddf256_mask:
10683 case X86::BI__builtin_ia32_expanddf512_mask:
10684 case X86::BI__builtin_ia32_expandsf128_mask:
10685 case X86::BI__builtin_ia32_expandsf256_mask:
10686 case X86::BI__builtin_ia32_expandsf512_mask:
10687 case X86::BI__builtin_ia32_expanddi128_mask:
10688 case X86::BI__builtin_ia32_expanddi256_mask:
10689 case X86::BI__builtin_ia32_expanddi512_mask:
10690 case X86::BI__builtin_ia32_expandsi128_mask:
10691 case X86::BI__builtin_ia32_expandsi256_mask:
10692 case X86::BI__builtin_ia32_expandsi512_mask:
10693 case X86::BI__builtin_ia32_expandhi128_mask:
10694 case X86::BI__builtin_ia32_expandhi256_mask:
10695 case X86::BI__builtin_ia32_expandhi512_mask:
10696 case X86::BI__builtin_ia32_expandqi128_mask:
10697 case X86::BI__builtin_ia32_expandqi256_mask:
10698 case X86::BI__builtin_ia32_expandqi512_mask:
10699 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
10700
10701 case X86::BI__builtin_ia32_compressdf128_mask:
10702 case X86::BI__builtin_ia32_compressdf256_mask:
10703 case X86::BI__builtin_ia32_compressdf512_mask:
10704 case X86::BI__builtin_ia32_compresssf128_mask:
10705 case X86::BI__builtin_ia32_compresssf256_mask:
10706 case X86::BI__builtin_ia32_compresssf512_mask:
10707 case X86::BI__builtin_ia32_compressdi128_mask:
10708 case X86::BI__builtin_ia32_compressdi256_mask:
10709 case X86::BI__builtin_ia32_compressdi512_mask:
10710 case X86::BI__builtin_ia32_compresssi128_mask:
10711 case X86::BI__builtin_ia32_compresssi256_mask:
10712 case X86::BI__builtin_ia32_compresssi512_mask:
10713 case X86::BI__builtin_ia32_compresshi128_mask:
10714 case X86::BI__builtin_ia32_compresshi256_mask:
10715 case X86::BI__builtin_ia32_compresshi512_mask:
10716 case X86::BI__builtin_ia32_compressqi128_mask:
10717 case X86::BI__builtin_ia32_compressqi256_mask:
10718 case X86::BI__builtin_ia32_compressqi512_mask:
10719 return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
10720
10721 case X86::BI__builtin_ia32_gather3div2df:
10722 case X86::BI__builtin_ia32_gather3div2di:
10723 case X86::BI__builtin_ia32_gather3div4df:
10724 case X86::BI__builtin_ia32_gather3div4di:
10725 case X86::BI__builtin_ia32_gather3div4sf:
10726 case X86::BI__builtin_ia32_gather3div4si:
10727 case X86::BI__builtin_ia32_gather3div8sf:
10728 case X86::BI__builtin_ia32_gather3div8si:
10729 case X86::BI__builtin_ia32_gather3siv2df:
10730 case X86::BI__builtin_ia32_gather3siv2di:
10731 case X86::BI__builtin_ia32_gather3siv4df:
10732 case X86::BI__builtin_ia32_gather3siv4di:
10733 case X86::BI__builtin_ia32_gather3siv4sf:
10734 case X86::BI__builtin_ia32_gather3siv4si:
10735 case X86::BI__builtin_ia32_gather3siv8sf:
10736 case X86::BI__builtin_ia32_gather3siv8si:
10737 case X86::BI__builtin_ia32_gathersiv8df:
10738 case X86::BI__builtin_ia32_gathersiv16sf:
10739 case X86::BI__builtin_ia32_gatherdiv8df:
10740 case X86::BI__builtin_ia32_gatherdiv16sf:
10741 case X86::BI__builtin_ia32_gathersiv8di:
10742 case X86::BI__builtin_ia32_gathersiv16si:
10743 case X86::BI__builtin_ia32_gatherdiv8di:
10744 case X86::BI__builtin_ia32_gatherdiv16si: {
10745 Intrinsic::ID IID;
10746 switch (BuiltinID) {
10747 default: llvm_unreachable("Unexpected builtin");
10748 case X86::BI__builtin_ia32_gather3div2df:
10749 IID = Intrinsic::x86_avx512_mask_gather3div2_df;
10750 break;
10751 case X86::BI__builtin_ia32_gather3div2di:
10752 IID = Intrinsic::x86_avx512_mask_gather3div2_di;
10753 break;
10754 case X86::BI__builtin_ia32_gather3div4df:
10755 IID = Intrinsic::x86_avx512_mask_gather3div4_df;
10756 break;
10757 case X86::BI__builtin_ia32_gather3div4di:
10758 IID = Intrinsic::x86_avx512_mask_gather3div4_di;
10759 break;
10760 case X86::BI__builtin_ia32_gather3div4sf:
10761 IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
10762 break;
10763 case X86::BI__builtin_ia32_gather3div4si:
10764 IID = Intrinsic::x86_avx512_mask_gather3div4_si;
10765 break;
10766 case X86::BI__builtin_ia32_gather3div8sf:
10767 IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
10768 break;
10769 case X86::BI__builtin_ia32_gather3div8si:
10770 IID = Intrinsic::x86_avx512_mask_gather3div8_si;
10771 break;
10772 case X86::BI__builtin_ia32_gather3siv2df:
10773 IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
10774 break;
10775 case X86::BI__builtin_ia32_gather3siv2di:
10776 IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
10777 break;
10778 case X86::BI__builtin_ia32_gather3siv4df:
10779 IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
10780 break;
10781 case X86::BI__builtin_ia32_gather3siv4di:
10782 IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
10783 break;
10784 case X86::BI__builtin_ia32_gather3siv4sf:
10785 IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
10786 break;
10787 case X86::BI__builtin_ia32_gather3siv4si:
10788 IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
10789 break;
10790 case X86::BI__builtin_ia32_gather3siv8sf:
10791 IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
10792 break;
10793 case X86::BI__builtin_ia32_gather3siv8si:
10794 IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
10795 break;
10796 case X86::BI__builtin_ia32_gathersiv8df:
10797 IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
10798 break;
10799 case X86::BI__builtin_ia32_gathersiv16sf:
10800 IID = Intrinsic::x86_avx512_mask_gather_dps_512;
10801 break;
10802 case X86::BI__builtin_ia32_gatherdiv8df:
10803 IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
10804 break;
10805 case X86::BI__builtin_ia32_gatherdiv16sf:
10806 IID = Intrinsic::x86_avx512_mask_gather_qps_512;
10807 break;
10808 case X86::BI__builtin_ia32_gathersiv8di:
10809 IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
10810 break;
10811 case X86::BI__builtin_ia32_gathersiv16si:
10812 IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
10813 break;
10814 case X86::BI__builtin_ia32_gatherdiv8di:
10815 IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
10816 break;
10817 case X86::BI__builtin_ia32_gatherdiv16si:
10818 IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
10819 break;
10820 }
10821
10822 unsigned MinElts = std::min(Ops[0]->getType()->getVectorNumElements(),
10823 Ops[2]->getType()->getVectorNumElements());
10824 Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
10825 Function *Intr = CGM.getIntrinsic(IID);
10826 return Builder.CreateCall(Intr, Ops);
10827 }
10828
10829 case X86::BI__builtin_ia32_scattersiv8df:
10830 case X86::BI__builtin_ia32_scattersiv16sf:
10831 case X86::BI__builtin_ia32_scatterdiv8df:
10832 case X86::BI__builtin_ia32_scatterdiv16sf:
10833 case X86::BI__builtin_ia32_scattersiv8di:
10834 case X86::BI__builtin_ia32_scattersiv16si:
10835 case X86::BI__builtin_ia32_scatterdiv8di:
10836 case X86::BI__builtin_ia32_scatterdiv16si:
10837 case X86::BI__builtin_ia32_scatterdiv2df:
10838 case X86::BI__builtin_ia32_scatterdiv2di:
10839 case X86::BI__builtin_ia32_scatterdiv4df:
10840 case X86::BI__builtin_ia32_scatterdiv4di:
10841 case X86::BI__builtin_ia32_scatterdiv4sf:
10842 case X86::BI__builtin_ia32_scatterdiv4si:
10843 case X86::BI__builtin_ia32_scatterdiv8sf:
10844 case X86::BI__builtin_ia32_scatterdiv8si:
10845 case X86::BI__builtin_ia32_scattersiv2df:
10846 case X86::BI__builtin_ia32_scattersiv2di:
10847 case X86::BI__builtin_ia32_scattersiv4df:
10848 case X86::BI__builtin_ia32_scattersiv4di:
10849 case X86::BI__builtin_ia32_scattersiv4sf:
10850 case X86::BI__builtin_ia32_scattersiv4si:
10851 case X86::BI__builtin_ia32_scattersiv8sf:
10852 case X86::BI__builtin_ia32_scattersiv8si: {
10853 Intrinsic::ID IID;
10854 switch (BuiltinID) {
10855 default: llvm_unreachable("Unexpected builtin");
10856 case X86::BI__builtin_ia32_scattersiv8df:
10857 IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
10858 break;
10859 case X86::BI__builtin_ia32_scattersiv16sf:
10860 IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
10861 break;
10862 case X86::BI__builtin_ia32_scatterdiv8df:
10863 IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
10864 break;
10865 case X86::BI__builtin_ia32_scatterdiv16sf:
10866 IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
10867 break;
10868 case X86::BI__builtin_ia32_scattersiv8di:
10869 IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
10870 break;
10871 case X86::BI__builtin_ia32_scattersiv16si:
10872 IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
10873 break;
10874 case X86::BI__builtin_ia32_scatterdiv8di:
10875 IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
10876 break;
10877 case X86::BI__builtin_ia32_scatterdiv16si:
10878 IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
10879 break;
10880 case X86::BI__builtin_ia32_scatterdiv2df:
10881 IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
10882 break;
10883 case X86::BI__builtin_ia32_scatterdiv2di:
10884 IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
10885 break;
10886 case X86::BI__builtin_ia32_scatterdiv4df:
10887 IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
10888 break;
10889 case X86::BI__builtin_ia32_scatterdiv4di:
10890 IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
10891 break;
10892 case X86::BI__builtin_ia32_scatterdiv4sf:
10893 IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
10894 break;
10895 case X86::BI__builtin_ia32_scatterdiv4si:
10896 IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
10897 break;
10898 case X86::BI__builtin_ia32_scatterdiv8sf:
10899 IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
10900 break;
10901 case X86::BI__builtin_ia32_scatterdiv8si:
10902 IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
10903 break;
10904 case X86::BI__builtin_ia32_scattersiv2df:
10905 IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
10906 break;
10907 case X86::BI__builtin_ia32_scattersiv2di:
10908 IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
10909 break;
10910 case X86::BI__builtin_ia32_scattersiv4df:
10911 IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
10912 break;
10913 case X86::BI__builtin_ia32_scattersiv4di:
10914 IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
10915 break;
10916 case X86::BI__builtin_ia32_scattersiv4sf:
10917 IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
10918 break;
10919 case X86::BI__builtin_ia32_scattersiv4si:
10920 IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
10921 break;
10922 case X86::BI__builtin_ia32_scattersiv8sf:
10923 IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
10924 break;
10925 case X86::BI__builtin_ia32_scattersiv8si:
10926 IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
10927 break;
10928 }
10929
10930 unsigned MinElts = std::min(Ops[2]->getType()->getVectorNumElements(),
10931 Ops[3]->getType()->getVectorNumElements());
10932 Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
10933 Function *Intr = CGM.getIntrinsic(IID);
10934 return Builder.CreateCall(Intr, Ops);
10935 }
10936
10937 case X86::BI__builtin_ia32_storehps:
10938 case X86::BI__builtin_ia32_storelps: {
10939 llvm::Type *PtrTy = CGM.getPointerInDefaultAS(Int64Ty);
10940 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
10941
10942 // cast val v2i64
10943 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
10944
10945 // extract (0, 1)
10946 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
10947 Ops[1] = Builder.CreateExtractElement(Ops[1], Index, "extract");
10948
10949 // cast pointer to i64 & store
10950 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
10951 return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
10952 }
10953 case X86::BI__builtin_ia32_vextractf128_pd256:
10954 case X86::BI__builtin_ia32_vextractf128_ps256:
10955 case X86::BI__builtin_ia32_vextractf128_si256:
10956 case X86::BI__builtin_ia32_extract128i256:
10957 case X86::BI__builtin_ia32_extractf64x4_mask:
10958 case X86::BI__builtin_ia32_extractf32x4_mask:
10959 case X86::BI__builtin_ia32_extracti64x4_mask:
10960 case X86::BI__builtin_ia32_extracti32x4_mask:
10961 case X86::BI__builtin_ia32_extractf32x8_mask:
10962 case X86::BI__builtin_ia32_extracti32x8_mask:
10963 case X86::BI__builtin_ia32_extractf32x4_256_mask:
10964 case X86::BI__builtin_ia32_extracti32x4_256_mask:
10965 case X86::BI__builtin_ia32_extractf64x2_256_mask:
10966 case X86::BI__builtin_ia32_extracti64x2_256_mask:
10967 case X86::BI__builtin_ia32_extractf64x2_512_mask:
10968 case X86::BI__builtin_ia32_extracti64x2_512_mask: {
10969 llvm::Type *DstTy = ConvertType(E->getType());
10970 unsigned NumElts = DstTy->getVectorNumElements();
10971 unsigned SrcNumElts = Ops[0]->getType()->getVectorNumElements();
10972 unsigned SubVectors = SrcNumElts / NumElts;
10973 unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
10974 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
10975 Index &= SubVectors - 1; // Remove any extra bits.
10976 Index *= NumElts;
10977
10978 uint32_t Indices[16];
10979 for (unsigned i = 0; i != NumElts; ++i)
10980 Indices[i] = i + Index;
10981
10982 Value *Res = Builder.CreateShuffleVector(Ops[0],
10983 UndefValue::get(Ops[0]->getType()),
10984 makeArrayRef(Indices, NumElts),
10985 "extract");
10986
10987 if (Ops.size() == 4)
10988 Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
10989
10990 return Res;
10991 }
10992 case X86::BI__builtin_ia32_vinsertf128_pd256:
10993 case X86::BI__builtin_ia32_vinsertf128_ps256:
10994 case X86::BI__builtin_ia32_vinsertf128_si256:
10995 case X86::BI__builtin_ia32_insert128i256:
10996 case X86::BI__builtin_ia32_insertf64x4:
10997 case X86::BI__builtin_ia32_insertf32x4:
10998 case X86::BI__builtin_ia32_inserti64x4:
10999 case X86::BI__builtin_ia32_inserti32x4:
11000 case X86::BI__builtin_ia32_insertf32x8:
11001 case X86::BI__builtin_ia32_inserti32x8:
11002 case X86::BI__builtin_ia32_insertf32x4_256:
11003 case X86::BI__builtin_ia32_inserti32x4_256:
11004 case X86::BI__builtin_ia32_insertf64x2_256:
11005 case X86::BI__builtin_ia32_inserti64x2_256:
11006 case X86::BI__builtin_ia32_insertf64x2_512:
11007 case X86::BI__builtin_ia32_inserti64x2_512: {
11008 unsigned DstNumElts = Ops[0]->getType()->getVectorNumElements();
11009 unsigned SrcNumElts = Ops[1]->getType()->getVectorNumElements();
11010 unsigned SubVectors = DstNumElts / SrcNumElts;
11011 unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
11012 assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
11013 Index &= SubVectors - 1; // Remove any extra bits.
11014 Index *= SrcNumElts;
11015
11016 uint32_t Indices[16];
11017 for (unsigned i = 0; i != DstNumElts; ++i)
11018 Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
11019
11020 Value *Op1 = Builder.CreateShuffleVector(Ops[1],
11021 UndefValue::get(Ops[1]->getType()),
11022 makeArrayRef(Indices, DstNumElts),
11023 "widen");
11024
11025 for (unsigned i = 0; i != DstNumElts; ++i) {
11026 if (i >= Index && i < (Index + SrcNumElts))
11027 Indices[i] = (i - Index) + DstNumElts;
11028 else
11029 Indices[i] = i;
11030 }
11031
11032 return Builder.CreateShuffleVector(Ops[0], Op1,
11033 makeArrayRef(Indices, DstNumElts),
11034 "insert");
11035 }
11036 case X86::BI__builtin_ia32_pmovqd512_mask:
11037 case X86::BI__builtin_ia32_pmovwb512_mask: {
11038 Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
11039 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
11040 }
11041 case X86::BI__builtin_ia32_pmovdb512_mask:
11042 case X86::BI__builtin_ia32_pmovdw512_mask:
11043 case X86::BI__builtin_ia32_pmovqw512_mask: {
11044 if (const auto *C = dyn_cast<Constant>(Ops[2]))
11045 if (C->isAllOnesValue())
11046 return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
11047
11048 Intrinsic::ID IID;
11049 switch (BuiltinID) {
11050 default: llvm_unreachable("Unsupported intrinsic!");
11051 case X86::BI__builtin_ia32_pmovdb512_mask:
11052 IID = Intrinsic::x86_avx512_mask_pmov_db_512;
11053 break;
11054 case X86::BI__builtin_ia32_pmovdw512_mask:
11055 IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
11056 break;
11057 case X86::BI__builtin_ia32_pmovqw512_mask:
11058 IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
11059 break;
11060 }
11061
11062 Function *Intr = CGM.getIntrinsic(IID);
11063 return Builder.CreateCall(Intr, Ops);
11064 }
11065 case X86::BI__builtin_ia32_pblendw128:
11066 case X86::BI__builtin_ia32_blendpd:
11067 case X86::BI__builtin_ia32_blendps:
11068 case X86::BI__builtin_ia32_blendpd256:
11069 case X86::BI__builtin_ia32_blendps256:
11070 case X86::BI__builtin_ia32_pblendw256:
11071 case X86::BI__builtin_ia32_pblendd128:
11072 case X86::BI__builtin_ia32_pblendd256: {
11073 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11074 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11075
11076 uint32_t Indices[16];
11077 // If there are more than 8 elements, the immediate is used twice so make
11078 // sure we handle that.
11079 for (unsigned i = 0; i != NumElts; ++i)
11080 Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
11081
11082 return Builder.CreateShuffleVector(Ops[0], Ops[1],
11083 makeArrayRef(Indices, NumElts),
11084 "blend");
11085 }
11086 case X86::BI__builtin_ia32_pshuflw:
11087 case X86::BI__builtin_ia32_pshuflw256:
11088 case X86::BI__builtin_ia32_pshuflw512: {
11089 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11090 llvm::Type *Ty = Ops[0]->getType();
11091 unsigned NumElts = Ty->getVectorNumElements();
11092
11093 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
11094 Imm = (Imm & 0xff) * 0x01010101;
11095
11096 uint32_t Indices[32];
11097 for (unsigned l = 0; l != NumElts; l += 8) {
11098 for (unsigned i = 0; i != 4; ++i) {
11099 Indices[l + i] = l + (Imm & 3);
11100 Imm >>= 2;
11101 }
11102 for (unsigned i = 4; i != 8; ++i)
11103 Indices[l + i] = l + i;
11104 }
11105
11106 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
11107 makeArrayRef(Indices, NumElts),
11108 "pshuflw");
11109 }
11110 case X86::BI__builtin_ia32_pshufhw:
11111 case X86::BI__builtin_ia32_pshufhw256:
11112 case X86::BI__builtin_ia32_pshufhw512: {
11113 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11114 llvm::Type *Ty = Ops[0]->getType();
11115 unsigned NumElts = Ty->getVectorNumElements();
11116
11117 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
11118 Imm = (Imm & 0xff) * 0x01010101;
11119
11120 uint32_t Indices[32];
11121 for (unsigned l = 0; l != NumElts; l += 8) {
11122 for (unsigned i = 0; i != 4; ++i)
11123 Indices[l + i] = l + i;
11124 for (unsigned i = 4; i != 8; ++i) {
11125 Indices[l + i] = l + 4 + (Imm & 3);
11126 Imm >>= 2;
11127 }
11128 }
11129
11130 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
11131 makeArrayRef(Indices, NumElts),
11132 "pshufhw");
11133 }
11134 case X86::BI__builtin_ia32_pshufd:
11135 case X86::BI__builtin_ia32_pshufd256:
11136 case X86::BI__builtin_ia32_pshufd512:
11137 case X86::BI__builtin_ia32_vpermilpd:
11138 case X86::BI__builtin_ia32_vpermilps:
11139 case X86::BI__builtin_ia32_vpermilpd256:
11140 case X86::BI__builtin_ia32_vpermilps256:
11141 case X86::BI__builtin_ia32_vpermilpd512:
11142 case X86::BI__builtin_ia32_vpermilps512: {
11143 uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11144 llvm::Type *Ty = Ops[0]->getType();
11145 unsigned NumElts = Ty->getVectorNumElements();
11146 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
11147 unsigned NumLaneElts = NumElts / NumLanes;
11148
11149 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
11150 Imm = (Imm & 0xff) * 0x01010101;
11151
11152 uint32_t Indices[16];
11153 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
11154 for (unsigned i = 0; i != NumLaneElts; ++i) {
11155 Indices[i + l] = (Imm % NumLaneElts) + l;
11156 Imm /= NumLaneElts;
11157 }
11158 }
11159
11160 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
11161 makeArrayRef(Indices, NumElts),
11162 "permil");
11163 }
11164 case X86::BI__builtin_ia32_shufpd:
11165 case X86::BI__builtin_ia32_shufpd256:
11166 case X86::BI__builtin_ia32_shufpd512:
11167 case X86::BI__builtin_ia32_shufps:
11168 case X86::BI__builtin_ia32_shufps256:
11169 case X86::BI__builtin_ia32_shufps512: {
11170 uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11171 llvm::Type *Ty = Ops[0]->getType();
11172 unsigned NumElts = Ty->getVectorNumElements();
11173 unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
11174 unsigned NumLaneElts = NumElts / NumLanes;
11175
11176 // Splat the 8-bits of immediate 4 times to help the loop wrap around.
11177 Imm = (Imm & 0xff) * 0x01010101;
11178
11179 uint32_t Indices[16];
11180 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
11181 for (unsigned i = 0; i != NumLaneElts; ++i) {
11182 unsigned Index = Imm % NumLaneElts;
11183 Imm /= NumLaneElts;
11184 if (i >= (NumLaneElts / 2))
11185 Index += NumElts;
11186 Indices[l + i] = l + Index;
11187 }
11188 }
11189
11190 return Builder.CreateShuffleVector(Ops[0], Ops[1],
11191 makeArrayRef(Indices, NumElts),
11192 "shufp");
11193 }
11194 case X86::BI__builtin_ia32_permdi256:
11195 case X86::BI__builtin_ia32_permdf256:
11196 case X86::BI__builtin_ia32_permdi512:
11197 case X86::BI__builtin_ia32_permdf512: {
11198 unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11199 llvm::Type *Ty = Ops[0]->getType();
11200 unsigned NumElts = Ty->getVectorNumElements();
11201
11202 // These intrinsics operate on 256-bit lanes of four 64-bit elements.
11203 uint32_t Indices[8];
11204 for (unsigned l = 0; l != NumElts; l += 4)
11205 for (unsigned i = 0; i != 4; ++i)
11206 Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
11207
11208 return Builder.CreateShuffleVector(Ops[0], UndefValue::get(Ty),
11209 makeArrayRef(Indices, NumElts),
11210 "perm");
11211 }
11212 case X86::BI__builtin_ia32_palignr128:
11213 case X86::BI__builtin_ia32_palignr256:
11214 case X86::BI__builtin_ia32_palignr512: {
11215 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
11216
11217 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11218 assert(NumElts % 16 == 0);
11219
11220 // If palignr is shifting the pair of vectors more than the size of two
11221 // lanes, emit zero.
11222 if (ShiftVal >= 32)
11223 return llvm::Constant::getNullValue(ConvertType(E->getType()));
11224
11225 // If palignr is shifting the pair of input vectors more than one lane,
11226 // but less than two lanes, convert to shifting in zeroes.
11227 if (ShiftVal > 16) {
11228 ShiftVal -= 16;
11229 Ops[1] = Ops[0];
11230 Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
11231 }
11232
11233 uint32_t Indices[64];
11234 // 256-bit palignr operates on 128-bit lanes so we need to handle that
11235 for (unsigned l = 0; l != NumElts; l += 16) {
11236 for (unsigned i = 0; i != 16; ++i) {
11237 unsigned Idx = ShiftVal + i;
11238 if (Idx >= 16)
11239 Idx += NumElts - 16; // End of lane, switch operand.
11240 Indices[l + i] = Idx + l;
11241 }
11242 }
11243
11244 return Builder.CreateShuffleVector(Ops[1], Ops[0],
11245 makeArrayRef(Indices, NumElts),
11246 "palignr");
11247 }
11248 case X86::BI__builtin_ia32_alignd128:
11249 case X86::BI__builtin_ia32_alignd256:
11250 case X86::BI__builtin_ia32_alignd512:
11251 case X86::BI__builtin_ia32_alignq128:
11252 case X86::BI__builtin_ia32_alignq256:
11253 case X86::BI__builtin_ia32_alignq512: {
11254 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11255 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
11256
11257 // Mask the shift amount to width of two vectors.
11258 ShiftVal &= (2 * NumElts) - 1;
11259
11260 uint32_t Indices[16];
11261 for (unsigned i = 0; i != NumElts; ++i)
11262 Indices[i] = i + ShiftVal;
11263
11264 return Builder.CreateShuffleVector(Ops[1], Ops[0],
11265 makeArrayRef(Indices, NumElts),
11266 "valign");
11267 }
11268 case X86::BI__builtin_ia32_shuf_f32x4_256:
11269 case X86::BI__builtin_ia32_shuf_f64x2_256:
11270 case X86::BI__builtin_ia32_shuf_i32x4_256:
11271 case X86::BI__builtin_ia32_shuf_i64x2_256:
11272 case X86::BI__builtin_ia32_shuf_f32x4:
11273 case X86::BI__builtin_ia32_shuf_f64x2:
11274 case X86::BI__builtin_ia32_shuf_i32x4:
11275 case X86::BI__builtin_ia32_shuf_i64x2: {
11276 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11277 llvm::Type *Ty = Ops[0]->getType();
11278 unsigned NumElts = Ty->getVectorNumElements();
11279 unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
11280 unsigned NumLaneElts = NumElts / NumLanes;
11281
11282 uint32_t Indices[16];
11283 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
11284 unsigned Index = (Imm % NumLanes) * NumLaneElts;
11285 Imm /= NumLanes; // Discard the bits we just used.
11286 if (l >= (NumElts / 2))
11287 Index += NumElts; // Switch to other source.
11288 for (unsigned i = 0; i != NumLaneElts; ++i) {
11289 Indices[l + i] = Index + i;
11290 }
11291 }
11292
11293 return Builder.CreateShuffleVector(Ops[0], Ops[1],
11294 makeArrayRef(Indices, NumElts),
11295 "shuf");
11296 }
11297
11298 case X86::BI__builtin_ia32_vperm2f128_pd256:
11299 case X86::BI__builtin_ia32_vperm2f128_ps256:
11300 case X86::BI__builtin_ia32_vperm2f128_si256:
11301 case X86::BI__builtin_ia32_permti256: {
11302 unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
11303 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11304
11305 // This takes a very simple approach since there are two lanes and a
11306 // shuffle can have 2 inputs. So we reserve the first input for the first
11307 // lane and the second input for the second lane. This may result in
11308 // duplicate sources, but this can be dealt with in the backend.
11309
11310 Value *OutOps[2];
11311 uint32_t Indices[8];
11312 for (unsigned l = 0; l != 2; ++l) {
11313 // Determine the source for this lane.
11314 if (Imm & (1 << ((l * 4) + 3)))
11315 OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
11316 else if (Imm & (1 << ((l * 4) + 1)))
11317 OutOps[l] = Ops[1];
11318 else
11319 OutOps[l] = Ops[0];
11320
11321 for (unsigned i = 0; i != NumElts/2; ++i) {
11322 // Start with ith element of the source for this lane.
11323 unsigned Idx = (l * NumElts) + i;
11324 // If bit 0 of the immediate half is set, switch to the high half of
11325 // the source.
11326 if (Imm & (1 << (l * 4)))
11327 Idx += NumElts/2;
11328 Indices[(l * (NumElts/2)) + i] = Idx;
11329 }
11330 }
11331
11332 return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
11333 makeArrayRef(Indices, NumElts),
11334 "vperm");
11335 }
11336
11337 case X86::BI__builtin_ia32_pslldqi128_byteshift:
11338 case X86::BI__builtin_ia32_pslldqi256_byteshift:
11339 case X86::BI__builtin_ia32_pslldqi512_byteshift: {
11340 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11341 llvm::Type *ResultType = Ops[0]->getType();
11342 // Builtin type is vXi64 so multiply by 8 to get bytes.
11343 unsigned NumElts = ResultType->getVectorNumElements() * 8;
11344
11345 // If pslldq is shifting the vector more than 15 bytes, emit zero.
11346 if (ShiftVal >= 16)
11347 return llvm::Constant::getNullValue(ResultType);
11348
11349 uint32_t Indices[64];
11350 // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
11351 for (unsigned l = 0; l != NumElts; l += 16) {
11352 for (unsigned i = 0; i != 16; ++i) {
11353 unsigned Idx = NumElts + i - ShiftVal;
11354 if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
11355 Indices[l + i] = Idx + l;
11356 }
11357 }
11358
11359 llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
11360 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
11361 Value *Zero = llvm::Constant::getNullValue(VecTy);
11362 Value *SV = Builder.CreateShuffleVector(Zero, Cast,
11363 makeArrayRef(Indices, NumElts),
11364 "pslldq");
11365 return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
11366 }
11367 case X86::BI__builtin_ia32_psrldqi128_byteshift:
11368 case X86::BI__builtin_ia32_psrldqi256_byteshift:
11369 case X86::BI__builtin_ia32_psrldqi512_byteshift: {
11370 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11371 llvm::Type *ResultType = Ops[0]->getType();
11372 // Builtin type is vXi64 so multiply by 8 to get bytes.
11373 unsigned NumElts = ResultType->getVectorNumElements() * 8;
11374
11375 // If psrldq is shifting the vector more than 15 bytes, emit zero.
11376 if (ShiftVal >= 16)
11377 return llvm::Constant::getNullValue(ResultType);
11378
11379 uint32_t Indices[64];
11380 // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
11381 for (unsigned l = 0; l != NumElts; l += 16) {
11382 for (unsigned i = 0; i != 16; ++i) {
11383 unsigned Idx = i + ShiftVal;
11384 if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
11385 Indices[l + i] = Idx + l;
11386 }
11387 }
11388
11389 llvm::Type *VecTy = llvm::VectorType::get(Int8Ty, NumElts);
11390 Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
11391 Value *Zero = llvm::Constant::getNullValue(VecTy);
11392 Value *SV = Builder.CreateShuffleVector(Cast, Zero,
11393 makeArrayRef(Indices, NumElts),
11394 "psrldq");
11395 return Builder.CreateBitCast(SV, ResultType, "cast");
11396 }
11397 case X86::BI__builtin_ia32_kshiftliqi:
11398 case X86::BI__builtin_ia32_kshiftlihi:
11399 case X86::BI__builtin_ia32_kshiftlisi:
11400 case X86::BI__builtin_ia32_kshiftlidi: {
11401 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11402 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11403
11404 if (ShiftVal >= NumElts)
11405 return llvm::Constant::getNullValue(Ops[0]->getType());
11406
11407 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
11408
11409 uint32_t Indices[64];
11410 for (unsigned i = 0; i != NumElts; ++i)
11411 Indices[i] = NumElts + i - ShiftVal;
11412
11413 Value *Zero = llvm::Constant::getNullValue(In->getType());
11414 Value *SV = Builder.CreateShuffleVector(Zero, In,
11415 makeArrayRef(Indices, NumElts),
11416 "kshiftl");
11417 return Builder.CreateBitCast(SV, Ops[0]->getType());
11418 }
11419 case X86::BI__builtin_ia32_kshiftriqi:
11420 case X86::BI__builtin_ia32_kshiftrihi:
11421 case X86::BI__builtin_ia32_kshiftrisi:
11422 case X86::BI__builtin_ia32_kshiftridi: {
11423 unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
11424 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11425
11426 if (ShiftVal >= NumElts)
11427 return llvm::Constant::getNullValue(Ops[0]->getType());
11428
11429 Value *In = getMaskVecValue(*this, Ops[0], NumElts);
11430
11431 uint32_t Indices[64];
11432 for (unsigned i = 0; i != NumElts; ++i)
11433 Indices[i] = i + ShiftVal;
11434
11435 Value *Zero = llvm::Constant::getNullValue(In->getType());
11436 Value *SV = Builder.CreateShuffleVector(In, Zero,
11437 makeArrayRef(Indices, NumElts),
11438 "kshiftr");
11439 return Builder.CreateBitCast(SV, Ops[0]->getType());
11440 }
11441 case X86::BI__builtin_ia32_movnti:
11442 case X86::BI__builtin_ia32_movnti64:
11443 case X86::BI__builtin_ia32_movntsd:
11444 case X86::BI__builtin_ia32_movntss: {
11445 llvm::MDNode *Node = llvm::MDNode::get(
11446 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
11447
11448 Value *Ptr = Ops[0];
11449 Value *Src = Ops[1];
11450
11451 // Extract the 0'th element of the source vector.
11452 if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
11453 BuiltinID == X86::BI__builtin_ia32_movntss)
11454 Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
11455
11456 // Convert the type of the pointer to a pointer to the stored type.
11457 Value *BC = Builder.CreateBitCast(
11458 Ptr, CGM.getPointerInDefaultAS(Src->getType()), "cast");
11459
11460 // Unaligned nontemporal store of the scalar value.
11461 StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
11462 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
11463 SI->setAlignment(1);
11464 return SI;
11465 }
11466 // Rotate is a special case of funnel shift - 1st 2 args are the same.
11467 case X86::BI__builtin_ia32_vprotb:
11468 case X86::BI__builtin_ia32_vprotw:
11469 case X86::BI__builtin_ia32_vprotd:
11470 case X86::BI__builtin_ia32_vprotq:
11471 case X86::BI__builtin_ia32_vprotbi:
11472 case X86::BI__builtin_ia32_vprotwi:
11473 case X86::BI__builtin_ia32_vprotdi:
11474 case X86::BI__builtin_ia32_vprotqi:
11475 case X86::BI__builtin_ia32_prold128:
11476 case X86::BI__builtin_ia32_prold256:
11477 case X86::BI__builtin_ia32_prold512:
11478 case X86::BI__builtin_ia32_prolq128:
11479 case X86::BI__builtin_ia32_prolq256:
11480 case X86::BI__builtin_ia32_prolq512:
11481 case X86::BI__builtin_ia32_prolvd128:
11482 case X86::BI__builtin_ia32_prolvd256:
11483 case X86::BI__builtin_ia32_prolvd512:
11484 case X86::BI__builtin_ia32_prolvq128:
11485 case X86::BI__builtin_ia32_prolvq256:
11486 case X86::BI__builtin_ia32_prolvq512:
11487 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
11488 case X86::BI__builtin_ia32_prord128:
11489 case X86::BI__builtin_ia32_prord256:
11490 case X86::BI__builtin_ia32_prord512:
11491 case X86::BI__builtin_ia32_prorq128:
11492 case X86::BI__builtin_ia32_prorq256:
11493 case X86::BI__builtin_ia32_prorq512:
11494 case X86::BI__builtin_ia32_prorvd128:
11495 case X86::BI__builtin_ia32_prorvd256:
11496 case X86::BI__builtin_ia32_prorvd512:
11497 case X86::BI__builtin_ia32_prorvq128:
11498 case X86::BI__builtin_ia32_prorvq256:
11499 case X86::BI__builtin_ia32_prorvq512:
11500 return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
11501 case X86::BI__builtin_ia32_selectb_128:
11502 case X86::BI__builtin_ia32_selectb_256:
11503 case X86::BI__builtin_ia32_selectb_512:
11504 case X86::BI__builtin_ia32_selectw_128:
11505 case X86::BI__builtin_ia32_selectw_256:
11506 case X86::BI__builtin_ia32_selectw_512:
11507 case X86::BI__builtin_ia32_selectd_128:
11508 case X86::BI__builtin_ia32_selectd_256:
11509 case X86::BI__builtin_ia32_selectd_512:
11510 case X86::BI__builtin_ia32_selectq_128:
11511 case X86::BI__builtin_ia32_selectq_256:
11512 case X86::BI__builtin_ia32_selectq_512:
11513 case X86::BI__builtin_ia32_selectps_128:
11514 case X86::BI__builtin_ia32_selectps_256:
11515 case X86::BI__builtin_ia32_selectps_512:
11516 case X86::BI__builtin_ia32_selectpd_128:
11517 case X86::BI__builtin_ia32_selectpd_256:
11518 case X86::BI__builtin_ia32_selectpd_512:
11519 return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
11520 case X86::BI__builtin_ia32_selectss_128:
11521 case X86::BI__builtin_ia32_selectsd_128: {
11522 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11523 Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11524 A = EmitX86ScalarSelect(*this, Ops[0], A, B);
11525 return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
11526 }
11527 case X86::BI__builtin_ia32_cmpb128_mask:
11528 case X86::BI__builtin_ia32_cmpb256_mask:
11529 case X86::BI__builtin_ia32_cmpb512_mask:
11530 case X86::BI__builtin_ia32_cmpw128_mask:
11531 case X86::BI__builtin_ia32_cmpw256_mask:
11532 case X86::BI__builtin_ia32_cmpw512_mask:
11533 case X86::BI__builtin_ia32_cmpd128_mask:
11534 case X86::BI__builtin_ia32_cmpd256_mask:
11535 case X86::BI__builtin_ia32_cmpd512_mask:
11536 case X86::BI__builtin_ia32_cmpq128_mask:
11537 case X86::BI__builtin_ia32_cmpq256_mask:
11538 case X86::BI__builtin_ia32_cmpq512_mask: {
11539 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11540 return EmitX86MaskedCompare(*this, CC, true, Ops);
11541 }
11542 case X86::BI__builtin_ia32_ucmpb128_mask:
11543 case X86::BI__builtin_ia32_ucmpb256_mask:
11544 case X86::BI__builtin_ia32_ucmpb512_mask:
11545 case X86::BI__builtin_ia32_ucmpw128_mask:
11546 case X86::BI__builtin_ia32_ucmpw256_mask:
11547 case X86::BI__builtin_ia32_ucmpw512_mask:
11548 case X86::BI__builtin_ia32_ucmpd128_mask:
11549 case X86::BI__builtin_ia32_ucmpd256_mask:
11550 case X86::BI__builtin_ia32_ucmpd512_mask:
11551 case X86::BI__builtin_ia32_ucmpq128_mask:
11552 case X86::BI__builtin_ia32_ucmpq256_mask:
11553 case X86::BI__builtin_ia32_ucmpq512_mask: {
11554 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
11555 return EmitX86MaskedCompare(*this, CC, false, Ops);
11556 }
11557 case X86::BI__builtin_ia32_vpcomb:
11558 case X86::BI__builtin_ia32_vpcomw:
11559 case X86::BI__builtin_ia32_vpcomd:
11560 case X86::BI__builtin_ia32_vpcomq:
11561 return EmitX86vpcom(*this, Ops, true);
11562 case X86::BI__builtin_ia32_vpcomub:
11563 case X86::BI__builtin_ia32_vpcomuw:
11564 case X86::BI__builtin_ia32_vpcomud:
11565 case X86::BI__builtin_ia32_vpcomuq:
11566 return EmitX86vpcom(*this, Ops, false);
11567
11568 case X86::BI__builtin_ia32_kortestcqi:
11569 case X86::BI__builtin_ia32_kortestchi:
11570 case X86::BI__builtin_ia32_kortestcsi:
11571 case X86::BI__builtin_ia32_kortestcdi: {
11572 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
11573 Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
11574 Value *Cmp = Builder.CreateICmpEQ(Or, C);
11575 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
11576 }
11577 case X86::BI__builtin_ia32_kortestzqi:
11578 case X86::BI__builtin_ia32_kortestzhi:
11579 case X86::BI__builtin_ia32_kortestzsi:
11580 case X86::BI__builtin_ia32_kortestzdi: {
11581 Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
11582 Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
11583 Value *Cmp = Builder.CreateICmpEQ(Or, C);
11584 return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
11585 }
11586
11587 case X86::BI__builtin_ia32_ktestcqi:
11588 case X86::BI__builtin_ia32_ktestzqi:
11589 case X86::BI__builtin_ia32_ktestchi:
11590 case X86::BI__builtin_ia32_ktestzhi:
11591 case X86::BI__builtin_ia32_ktestcsi:
11592 case X86::BI__builtin_ia32_ktestzsi:
11593 case X86::BI__builtin_ia32_ktestcdi:
11594 case X86::BI__builtin_ia32_ktestzdi: {
11595 Intrinsic::ID IID;
11596 switch (BuiltinID) {
11597 default: llvm_unreachable("Unsupported intrinsic!");
11598 case X86::BI__builtin_ia32_ktestcqi:
11599 IID = Intrinsic::x86_avx512_ktestc_b;
11600 break;
11601 case X86::BI__builtin_ia32_ktestzqi:
11602 IID = Intrinsic::x86_avx512_ktestz_b;
11603 break;
11604 case X86::BI__builtin_ia32_ktestchi:
11605 IID = Intrinsic::x86_avx512_ktestc_w;
11606 break;
11607 case X86::BI__builtin_ia32_ktestzhi:
11608 IID = Intrinsic::x86_avx512_ktestz_w;
11609 break;
11610 case X86::BI__builtin_ia32_ktestcsi:
11611 IID = Intrinsic::x86_avx512_ktestc_d;
11612 break;
11613 case X86::BI__builtin_ia32_ktestzsi:
11614 IID = Intrinsic::x86_avx512_ktestz_d;
11615 break;
11616 case X86::BI__builtin_ia32_ktestcdi:
11617 IID = Intrinsic::x86_avx512_ktestc_q;
11618 break;
11619 case X86::BI__builtin_ia32_ktestzdi:
11620 IID = Intrinsic::x86_avx512_ktestz_q;
11621 break;
11622 }
11623
11624 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11625 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11626 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11627 Function *Intr = CGM.getIntrinsic(IID);
11628 return Builder.CreateCall(Intr, {LHS, RHS});
11629 }
11630
11631 case X86::BI__builtin_ia32_kaddqi:
11632 case X86::BI__builtin_ia32_kaddhi:
11633 case X86::BI__builtin_ia32_kaddsi:
11634 case X86::BI__builtin_ia32_kadddi: {
11635 Intrinsic::ID IID;
11636 switch (BuiltinID) {
11637 default: llvm_unreachable("Unsupported intrinsic!");
11638 case X86::BI__builtin_ia32_kaddqi:
11639 IID = Intrinsic::x86_avx512_kadd_b;
11640 break;
11641 case X86::BI__builtin_ia32_kaddhi:
11642 IID = Intrinsic::x86_avx512_kadd_w;
11643 break;
11644 case X86::BI__builtin_ia32_kaddsi:
11645 IID = Intrinsic::x86_avx512_kadd_d;
11646 break;
11647 case X86::BI__builtin_ia32_kadddi:
11648 IID = Intrinsic::x86_avx512_kadd_q;
11649 break;
11650 }
11651
11652 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11653 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11654 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11655 Function *Intr = CGM.getIntrinsic(IID);
11656 Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
11657 return Builder.CreateBitCast(Res, Ops[0]->getType());
11658 }
11659 case X86::BI__builtin_ia32_kandqi:
11660 case X86::BI__builtin_ia32_kandhi:
11661 case X86::BI__builtin_ia32_kandsi:
11662 case X86::BI__builtin_ia32_kanddi:
11663 return EmitX86MaskLogic(*this, Instruction::And, Ops);
11664 case X86::BI__builtin_ia32_kandnqi:
11665 case X86::BI__builtin_ia32_kandnhi:
11666 case X86::BI__builtin_ia32_kandnsi:
11667 case X86::BI__builtin_ia32_kandndi:
11668 return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
11669 case X86::BI__builtin_ia32_korqi:
11670 case X86::BI__builtin_ia32_korhi:
11671 case X86::BI__builtin_ia32_korsi:
11672 case X86::BI__builtin_ia32_kordi:
11673 return EmitX86MaskLogic(*this, Instruction::Or, Ops);
11674 case X86::BI__builtin_ia32_kxnorqi:
11675 case X86::BI__builtin_ia32_kxnorhi:
11676 case X86::BI__builtin_ia32_kxnorsi:
11677 case X86::BI__builtin_ia32_kxnordi:
11678 return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
11679 case X86::BI__builtin_ia32_kxorqi:
11680 case X86::BI__builtin_ia32_kxorhi:
11681 case X86::BI__builtin_ia32_kxorsi:
11682 case X86::BI__builtin_ia32_kxordi:
11683 return EmitX86MaskLogic(*this, Instruction::Xor, Ops);
11684 case X86::BI__builtin_ia32_knotqi:
11685 case X86::BI__builtin_ia32_knothi:
11686 case X86::BI__builtin_ia32_knotsi:
11687 case X86::BI__builtin_ia32_knotdi: {
11688 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11689 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
11690 return Builder.CreateBitCast(Builder.CreateNot(Res),
11691 Ops[0]->getType());
11692 }
11693 case X86::BI__builtin_ia32_kmovb:
11694 case X86::BI__builtin_ia32_kmovw:
11695 case X86::BI__builtin_ia32_kmovd:
11696 case X86::BI__builtin_ia32_kmovq: {
11697 // Bitcast to vXi1 type and then back to integer. This gets the mask
11698 // register type into the IR, but might be optimized out depending on
11699 // what's around it.
11700 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11701 Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
11702 return Builder.CreateBitCast(Res, Ops[0]->getType());
11703 }
11704
11705 case X86::BI__builtin_ia32_kunpckdi:
11706 case X86::BI__builtin_ia32_kunpcksi:
11707 case X86::BI__builtin_ia32_kunpckhi: {
11708 unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11709 Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
11710 Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
11711 uint32_t Indices[64];
11712 for (unsigned i = 0; i != NumElts; ++i)
11713 Indices[i] = i;
11714
11715 // First extract half of each vector. This gives better codegen than
11716 // doing it in a single shuffle.
11717 LHS = Builder.CreateShuffleVector(LHS, LHS,
11718 makeArrayRef(Indices, NumElts / 2));
11719 RHS = Builder.CreateShuffleVector(RHS, RHS,
11720 makeArrayRef(Indices, NumElts / 2));
11721 // Concat the vectors.
11722 // NOTE: Operands are swapped to match the intrinsic definition.
11723 Value *Res = Builder.CreateShuffleVector(RHS, LHS,
11724 makeArrayRef(Indices, NumElts));
11725 return Builder.CreateBitCast(Res, Ops[0]->getType());
11726 }
11727
11728 case X86::BI__builtin_ia32_vplzcntd_128:
11729 case X86::BI__builtin_ia32_vplzcntd_256:
11730 case X86::BI__builtin_ia32_vplzcntd_512:
11731 case X86::BI__builtin_ia32_vplzcntq_128:
11732 case X86::BI__builtin_ia32_vplzcntq_256:
11733 case X86::BI__builtin_ia32_vplzcntq_512: {
11734 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
11735 return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
11736 }
11737 case X86::BI__builtin_ia32_sqrtss:
11738 case X86::BI__builtin_ia32_sqrtsd: {
11739 Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
11740 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
11741 A = Builder.CreateCall(F, {A});
11742 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
11743 }
11744 case X86::BI__builtin_ia32_sqrtsd_round_mask:
11745 case X86::BI__builtin_ia32_sqrtss_round_mask: {
11746 unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
11747 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
11748 // otherwise keep the intrinsic.
11749 if (CC != 4) {
11750 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtsd_round_mask ?
11751 Intrinsic::x86_avx512_mask_sqrt_sd :
11752 Intrinsic::x86_avx512_mask_sqrt_ss;
11753 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
11754 }
11755 Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
11756 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
11757 A = Builder.CreateCall(F, A);
11758 Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
11759 A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
11760 return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
11761 }
11762 case X86::BI__builtin_ia32_sqrtpd256:
11763 case X86::BI__builtin_ia32_sqrtpd:
11764 case X86::BI__builtin_ia32_sqrtps256:
11765 case X86::BI__builtin_ia32_sqrtps:
11766 case X86::BI__builtin_ia32_sqrtps512:
11767 case X86::BI__builtin_ia32_sqrtpd512: {
11768 if (Ops.size() == 2) {
11769 unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
11770 // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
11771 // otherwise keep the intrinsic.
11772 if (CC != 4) {
11773 Intrinsic::ID IID = BuiltinID == X86::BI__builtin_ia32_sqrtps512 ?
11774 Intrinsic::x86_avx512_sqrt_ps_512 :
11775 Intrinsic::x86_avx512_sqrt_pd_512;
11776 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
11777 }
11778 }
11779 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
11780 return Builder.CreateCall(F, Ops[0]);
11781 }
11782 case X86::BI__builtin_ia32_pabsb128:
11783 case X86::BI__builtin_ia32_pabsw128:
11784 case X86::BI__builtin_ia32_pabsd128:
11785 case X86::BI__builtin_ia32_pabsb256:
11786 case X86::BI__builtin_ia32_pabsw256:
11787 case X86::BI__builtin_ia32_pabsd256:
11788 case X86::BI__builtin_ia32_pabsq128:
11789 case X86::BI__builtin_ia32_pabsq256:
11790 case X86::BI__builtin_ia32_pabsb512:
11791 case X86::BI__builtin_ia32_pabsw512:
11792 case X86::BI__builtin_ia32_pabsd512:
11793 case X86::BI__builtin_ia32_pabsq512:
11794 return EmitX86Abs(*this, Ops);
11795
11796 case X86::BI__builtin_ia32_pmaxsb128:
11797 case X86::BI__builtin_ia32_pmaxsw128:
11798 case X86::BI__builtin_ia32_pmaxsd128:
11799 case X86::BI__builtin_ia32_pmaxsq128:
11800 case X86::BI__builtin_ia32_pmaxsb256:
11801 case X86::BI__builtin_ia32_pmaxsw256:
11802 case X86::BI__builtin_ia32_pmaxsd256:
11803 case X86::BI__builtin_ia32_pmaxsq256:
11804 case X86::BI__builtin_ia32_pmaxsb512:
11805 case X86::BI__builtin_ia32_pmaxsw512:
11806 case X86::BI__builtin_ia32_pmaxsd512:
11807 case X86::BI__builtin_ia32_pmaxsq512:
11808 return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
11809 case X86::BI__builtin_ia32_pmaxub128:
11810 case X86::BI__builtin_ia32_pmaxuw128:
11811 case X86::BI__builtin_ia32_pmaxud128:
11812 case X86::BI__builtin_ia32_pmaxuq128:
11813 case X86::BI__builtin_ia32_pmaxub256:
11814 case X86::BI__builtin_ia32_pmaxuw256:
11815 case X86::BI__builtin_ia32_pmaxud256:
11816 case X86::BI__builtin_ia32_pmaxuq256:
11817 case X86::BI__builtin_ia32_pmaxub512:
11818 case X86::BI__builtin_ia32_pmaxuw512:
11819 case X86::BI__builtin_ia32_pmaxud512:
11820 case X86::BI__builtin_ia32_pmaxuq512:
11821 return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
11822 case X86::BI__builtin_ia32_pminsb128:
11823 case X86::BI__builtin_ia32_pminsw128:
11824 case X86::BI__builtin_ia32_pminsd128:
11825 case X86::BI__builtin_ia32_pminsq128:
11826 case X86::BI__builtin_ia32_pminsb256:
11827 case X86::BI__builtin_ia32_pminsw256:
11828 case X86::BI__builtin_ia32_pminsd256:
11829 case X86::BI__builtin_ia32_pminsq256:
11830 case X86::BI__builtin_ia32_pminsb512:
11831 case X86::BI__builtin_ia32_pminsw512:
11832 case X86::BI__builtin_ia32_pminsd512:
11833 case X86::BI__builtin_ia32_pminsq512:
11834 return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
11835 case X86::BI__builtin_ia32_pminub128:
11836 case X86::BI__builtin_ia32_pminuw128:
11837 case X86::BI__builtin_ia32_pminud128:
11838 case X86::BI__builtin_ia32_pminuq128:
11839 case X86::BI__builtin_ia32_pminub256:
11840 case X86::BI__builtin_ia32_pminuw256:
11841 case X86::BI__builtin_ia32_pminud256:
11842 case X86::BI__builtin_ia32_pminuq256:
11843 case X86::BI__builtin_ia32_pminub512:
11844 case X86::BI__builtin_ia32_pminuw512:
11845 case X86::BI__builtin_ia32_pminud512:
11846 case X86::BI__builtin_ia32_pminuq512:
11847 return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
11848
11849 case X86::BI__builtin_ia32_pmuludq128:
11850 case X86::BI__builtin_ia32_pmuludq256:
11851 case X86::BI__builtin_ia32_pmuludq512:
11852 return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
11853
11854 case X86::BI__builtin_ia32_pmuldq128:
11855 case X86::BI__builtin_ia32_pmuldq256:
11856 case X86::BI__builtin_ia32_pmuldq512:
11857 return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
11858
11859 case X86::BI__builtin_ia32_pternlogd512_mask:
11860 case X86::BI__builtin_ia32_pternlogq512_mask:
11861 case X86::BI__builtin_ia32_pternlogd128_mask:
11862 case X86::BI__builtin_ia32_pternlogd256_mask:
11863 case X86::BI__builtin_ia32_pternlogq128_mask:
11864 case X86::BI__builtin_ia32_pternlogq256_mask:
11865 return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
11866
11867 case X86::BI__builtin_ia32_pternlogd512_maskz:
11868 case X86::BI__builtin_ia32_pternlogq512_maskz:
11869 case X86::BI__builtin_ia32_pternlogd128_maskz:
11870 case X86::BI__builtin_ia32_pternlogd256_maskz:
11871 case X86::BI__builtin_ia32_pternlogq128_maskz:
11872 case X86::BI__builtin_ia32_pternlogq256_maskz:
11873 return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
11874
11875 case X86::BI__builtin_ia32_vpshldd128:
11876 case X86::BI__builtin_ia32_vpshldd256:
11877 case X86::BI__builtin_ia32_vpshldd512:
11878 case X86::BI__builtin_ia32_vpshldq128:
11879 case X86::BI__builtin_ia32_vpshldq256:
11880 case X86::BI__builtin_ia32_vpshldq512:
11881 case X86::BI__builtin_ia32_vpshldw128:
11882 case X86::BI__builtin_ia32_vpshldw256:
11883 case X86::BI__builtin_ia32_vpshldw512:
11884 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
11885
11886 case X86::BI__builtin_ia32_vpshrdd128:
11887 case X86::BI__builtin_ia32_vpshrdd256:
11888 case X86::BI__builtin_ia32_vpshrdd512:
11889 case X86::BI__builtin_ia32_vpshrdq128:
11890 case X86::BI__builtin_ia32_vpshrdq256:
11891 case X86::BI__builtin_ia32_vpshrdq512:
11892 case X86::BI__builtin_ia32_vpshrdw128:
11893 case X86::BI__builtin_ia32_vpshrdw256:
11894 case X86::BI__builtin_ia32_vpshrdw512:
11895 // Ops 0 and 1 are swapped.
11896 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
11897
11898 case X86::BI__builtin_ia32_vpshldvd128:
11899 case X86::BI__builtin_ia32_vpshldvd256:
11900 case X86::BI__builtin_ia32_vpshldvd512:
11901 case X86::BI__builtin_ia32_vpshldvq128:
11902 case X86::BI__builtin_ia32_vpshldvq256:
11903 case X86::BI__builtin_ia32_vpshldvq512:
11904 case X86::BI__builtin_ia32_vpshldvw128:
11905 case X86::BI__builtin_ia32_vpshldvw256:
11906 case X86::BI__builtin_ia32_vpshldvw512:
11907 return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
11908
11909 case X86::BI__builtin_ia32_vpshrdvd128:
11910 case X86::BI__builtin_ia32_vpshrdvd256:
11911 case X86::BI__builtin_ia32_vpshrdvd512:
11912 case X86::BI__builtin_ia32_vpshrdvq128:
11913 case X86::BI__builtin_ia32_vpshrdvq256:
11914 case X86::BI__builtin_ia32_vpshrdvq512:
11915 case X86::BI__builtin_ia32_vpshrdvw128:
11916 case X86::BI__builtin_ia32_vpshrdvw256:
11917 case X86::BI__builtin_ia32_vpshrdvw512:
11918 // Ops 0 and 1 are swapped.
11919 return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
11920
11921 // 3DNow!
11922 case X86::BI__builtin_ia32_pswapdsf:
11923 case X86::BI__builtin_ia32_pswapdsi: {
11924 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
11925 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
11926 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
11927 return Builder.CreateCall(F, Ops, "pswapd");
11928 }
11929 case X86::BI__builtin_ia32_rdrand16_step:
11930 case X86::BI__builtin_ia32_rdrand32_step:
11931 case X86::BI__builtin_ia32_rdrand64_step:
11932 case X86::BI__builtin_ia32_rdseed16_step:
11933 case X86::BI__builtin_ia32_rdseed32_step:
11934 case X86::BI__builtin_ia32_rdseed64_step: {
11935 Intrinsic::ID ID;
11936 switch (BuiltinID) {
11937 default: llvm_unreachable("Unsupported intrinsic!");
11938 case X86::BI__builtin_ia32_rdrand16_step:
11939 ID = Intrinsic::x86_rdrand_16;
11940 break;
11941 case X86::BI__builtin_ia32_rdrand32_step:
11942 ID = Intrinsic::x86_rdrand_32;
11943 break;
11944 case X86::BI__builtin_ia32_rdrand64_step:
11945 ID = Intrinsic::x86_rdrand_64;
11946 break;
11947 case X86::BI__builtin_ia32_rdseed16_step:
11948 ID = Intrinsic::x86_rdseed_16;
11949 break;
11950 case X86::BI__builtin_ia32_rdseed32_step:
11951 ID = Intrinsic::x86_rdseed_32;
11952 break;
11953 case X86::BI__builtin_ia32_rdseed64_step:
11954 ID = Intrinsic::x86_rdseed_64;
11955 break;
11956 }
11957
11958 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
11959 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
11960 Ops[0]);
11961 return Builder.CreateExtractValue(Call, 1);
11962 }
11963 case X86::BI__builtin_ia32_addcarryx_u32:
11964 case X86::BI__builtin_ia32_addcarryx_u64:
11965 case X86::BI__builtin_ia32_subborrow_u32:
11966 case X86::BI__builtin_ia32_subborrow_u64: {
11967 Intrinsic::ID IID;
11968 switch (BuiltinID) {
11969 default: llvm_unreachable("Unsupported intrinsic!");
11970 case X86::BI__builtin_ia32_addcarryx_u32:
11971 IID = Intrinsic::x86_addcarry_32;
11972 break;
11973 case X86::BI__builtin_ia32_addcarryx_u64:
11974 IID = Intrinsic::x86_addcarry_64;
11975 break;
11976 case X86::BI__builtin_ia32_subborrow_u32:
11977 IID = Intrinsic::x86_subborrow_32;
11978 break;
11979 case X86::BI__builtin_ia32_subborrow_u64:
11980 IID = Intrinsic::x86_subborrow_64;
11981 break;
11982 }
11983
11984 Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
11985 { Ops[0], Ops[1], Ops[2] });
11986 Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
11987 Ops[3]);
11988 return Builder.CreateExtractValue(Call, 0);
11989 }
11990
11991 case X86::BI__builtin_ia32_fpclassps128_mask:
11992 case X86::BI__builtin_ia32_fpclassps256_mask:
11993 case X86::BI__builtin_ia32_fpclassps512_mask:
11994 case X86::BI__builtin_ia32_fpclasspd128_mask:
11995 case X86::BI__builtin_ia32_fpclasspd256_mask:
11996 case X86::BI__builtin_ia32_fpclasspd512_mask: {
11997 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
11998 Value *MaskIn = Ops[2];
11999 Ops.erase(&Ops[2]);
12000
12001 Intrinsic::ID ID;
12002 switch (BuiltinID) {
12003 default: llvm_unreachable("Unsupported intrinsic!");
12004 case X86::BI__builtin_ia32_fpclassps128_mask:
12005 ID = Intrinsic::x86_avx512_fpclass_ps_128;
12006 break;
12007 case X86::BI__builtin_ia32_fpclassps256_mask:
12008 ID = Intrinsic::x86_avx512_fpclass_ps_256;
12009 break;
12010 case X86::BI__builtin_ia32_fpclassps512_mask:
12011 ID = Intrinsic::x86_avx512_fpclass_ps_512;
12012 break;
12013 case X86::BI__builtin_ia32_fpclasspd128_mask:
12014 ID = Intrinsic::x86_avx512_fpclass_pd_128;
12015 break;
12016 case X86::BI__builtin_ia32_fpclasspd256_mask:
12017 ID = Intrinsic::x86_avx512_fpclass_pd_256;
12018 break;
12019 case X86::BI__builtin_ia32_fpclasspd512_mask:
12020 ID = Intrinsic::x86_avx512_fpclass_pd_512;
12021 break;
12022 }
12023
12024 Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12025 return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
12026 }
12027
12028 case X86::BI__builtin_ia32_vp2intersect_q_512:
12029 case X86::BI__builtin_ia32_vp2intersect_q_256:
12030 case X86::BI__builtin_ia32_vp2intersect_q_128:
12031 case X86::BI__builtin_ia32_vp2intersect_d_512:
12032 case X86::BI__builtin_ia32_vp2intersect_d_256:
12033 case X86::BI__builtin_ia32_vp2intersect_d_128: {
12034 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
12035 Intrinsic::ID ID;
12036
12037 switch (BuiltinID) {
12038 default: llvm_unreachable("Unsupported intrinsic!");
12039 case X86::BI__builtin_ia32_vp2intersect_q_512:
12040 ID = Intrinsic::x86_avx512_vp2intersect_q_512;
12041 break;
12042 case X86::BI__builtin_ia32_vp2intersect_q_256:
12043 ID = Intrinsic::x86_avx512_vp2intersect_q_256;
12044 break;
12045 case X86::BI__builtin_ia32_vp2intersect_q_128:
12046 ID = Intrinsic::x86_avx512_vp2intersect_q_128;
12047 break;
12048 case X86::BI__builtin_ia32_vp2intersect_d_512:
12049 ID = Intrinsic::x86_avx512_vp2intersect_d_512;
12050 break;
12051 case X86::BI__builtin_ia32_vp2intersect_d_256:
12052 ID = Intrinsic::x86_avx512_vp2intersect_d_256;
12053 break;
12054 case X86::BI__builtin_ia32_vp2intersect_d_128:
12055 ID = Intrinsic::x86_avx512_vp2intersect_d_128;
12056 break;
12057 }
12058
12059 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
12060 Value *Result = Builder.CreateExtractValue(Call, 0);
12061 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
12062 Value *Store = Builder.CreateDefaultAlignedStore(Result, Ops[2]);
12063
12064 Result = Builder.CreateExtractValue(Call, 1);
12065 Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
12066 Store = Builder.CreateDefaultAlignedStore(Result, Ops[3]);
12067 return Store;
12068 }
12069
12070 case X86::BI__builtin_ia32_vpmultishiftqb128:
12071 case X86::BI__builtin_ia32_vpmultishiftqb256:
12072 case X86::BI__builtin_ia32_vpmultishiftqb512: {
12073 Intrinsic::ID ID;
12074 switch (BuiltinID) {
12075 default: llvm_unreachable("Unsupported intrinsic!");
12076 case X86::BI__builtin_ia32_vpmultishiftqb128:
12077 ID = Intrinsic::x86_avx512_pmultishift_qb_128;
12078 break;
12079 case X86::BI__builtin_ia32_vpmultishiftqb256:
12080 ID = Intrinsic::x86_avx512_pmultishift_qb_256;
12081 break;
12082 case X86::BI__builtin_ia32_vpmultishiftqb512:
12083 ID = Intrinsic::x86_avx512_pmultishift_qb_512;
12084 break;
12085 }
12086
12087 return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12088 }
12089
12090 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
12091 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
12092 case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
12093 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
12094 Value *MaskIn = Ops[2];
12095 Ops.erase(&Ops[2]);
12096
12097 Intrinsic::ID ID;
12098 switch (BuiltinID) {
12099 default: llvm_unreachable("Unsupported intrinsic!");
12100 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
12101 ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
12102 break;
12103 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
12104 ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
12105 break;
12106 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
12107 ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
12108 break;
12109 }
12110
12111 Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12112 return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
12113 }
12114
12115 // packed comparison intrinsics
12116 case X86::BI__builtin_ia32_cmpeqps:
12117 case X86::BI__builtin_ia32_cmpeqpd:
12118 return getVectorFCmpIR(CmpInst::FCMP_OEQ);
12119 case X86::BI__builtin_ia32_cmpltps:
12120 case X86::BI__builtin_ia32_cmpltpd:
12121 return getVectorFCmpIR(CmpInst::FCMP_OLT);
12122 case X86::BI__builtin_ia32_cmpleps:
12123 case X86::BI__builtin_ia32_cmplepd:
12124 return getVectorFCmpIR(CmpInst::FCMP_OLE);
12125 case X86::BI__builtin_ia32_cmpunordps:
12126 case X86::BI__builtin_ia32_cmpunordpd:
12127 return getVectorFCmpIR(CmpInst::FCMP_UNO);
12128 case X86::BI__builtin_ia32_cmpneqps:
12129 case X86::BI__builtin_ia32_cmpneqpd:
12130 return getVectorFCmpIR(CmpInst::FCMP_UNE);
12131 case X86::BI__builtin_ia32_cmpnltps:
12132 case X86::BI__builtin_ia32_cmpnltpd:
12133 return getVectorFCmpIR(CmpInst::FCMP_UGE);
12134 case X86::BI__builtin_ia32_cmpnleps:
12135 case X86::BI__builtin_ia32_cmpnlepd:
12136 return getVectorFCmpIR(CmpInst::FCMP_UGT);
12137 case X86::BI__builtin_ia32_cmpordps:
12138 case X86::BI__builtin_ia32_cmpordpd:
12139 return getVectorFCmpIR(CmpInst::FCMP_ORD);
12140 case X86::BI__builtin_ia32_cmpps:
12141 case X86::BI__builtin_ia32_cmpps256:
12142 case X86::BI__builtin_ia32_cmppd:
12143 case X86::BI__builtin_ia32_cmppd256:
12144 case X86::BI__builtin_ia32_cmpps128_mask:
12145 case X86::BI__builtin_ia32_cmpps256_mask:
12146 case X86::BI__builtin_ia32_cmpps512_mask:
12147 case X86::BI__builtin_ia32_cmppd128_mask:
12148 case X86::BI__builtin_ia32_cmppd256_mask:
12149 case X86::BI__builtin_ia32_cmppd512_mask: {
12150 // Lowering vector comparisons to fcmp instructions, while
12151 // ignoring signalling behaviour requested
12152 // ignoring rounding mode requested
12153 // This is is only possible as long as FENV_ACCESS is not implemented.
12154 // See also: https://reviews.llvm.org/D45616
12155
12156 // The third argument is the comparison condition, and integer in the
12157 // range [0, 31]
12158 unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
12159
12160 // Lowering to IR fcmp instruction.
12161 // Ignoring requested signaling behaviour,
12162 // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
12163 FCmpInst::Predicate Pred;
12164 switch (CC) {
12165 case 0x00: Pred = FCmpInst::FCMP_OEQ; break;
12166 case 0x01: Pred = FCmpInst::FCMP_OLT; break;
12167 case 0x02: Pred = FCmpInst::FCMP_OLE; break;
12168 case 0x03: Pred = FCmpInst::FCMP_UNO; break;
12169 case 0x04: Pred = FCmpInst::FCMP_UNE; break;
12170 case 0x05: Pred = FCmpInst::FCMP_UGE; break;
12171 case 0x06: Pred = FCmpInst::FCMP_UGT; break;
12172 case 0x07: Pred = FCmpInst::FCMP_ORD; break;
12173 case 0x08: Pred = FCmpInst::FCMP_UEQ; break;
12174 case 0x09: Pred = FCmpInst::FCMP_ULT; break;
12175 case 0x0a: Pred = FCmpInst::FCMP_ULE; break;
12176 case 0x0b: Pred = FCmpInst::FCMP_FALSE; break;
12177 case 0x0c: Pred = FCmpInst::FCMP_ONE; break;
12178 case 0x0d: Pred = FCmpInst::FCMP_OGE; break;
12179 case 0x0e: Pred = FCmpInst::FCMP_OGT; break;
12180 case 0x0f: Pred = FCmpInst::FCMP_TRUE; break;
12181 case 0x10: Pred = FCmpInst::FCMP_OEQ; break;
12182 case 0x11: Pred = FCmpInst::FCMP_OLT; break;
12183 case 0x12: Pred = FCmpInst::FCMP_OLE; break;
12184 case 0x13: Pred = FCmpInst::FCMP_UNO; break;
12185 case 0x14: Pred = FCmpInst::FCMP_UNE; break;
12186 case 0x15: Pred = FCmpInst::FCMP_UGE; break;
12187 case 0x16: Pred = FCmpInst::FCMP_UGT; break;
12188 case 0x17: Pred = FCmpInst::FCMP_ORD; break;
12189 case 0x18: Pred = FCmpInst::FCMP_UEQ; break;
12190 case 0x19: Pred = FCmpInst::FCMP_ULT; break;
12191 case 0x1a: Pred = FCmpInst::FCMP_ULE; break;
12192 case 0x1b: Pred = FCmpInst::FCMP_FALSE; break;
12193 case 0x1c: Pred = FCmpInst::FCMP_ONE; break;
12194 case 0x1d: Pred = FCmpInst::FCMP_OGE; break;
12195 case 0x1e: Pred = FCmpInst::FCMP_OGT; break;
12196 case 0x1f: Pred = FCmpInst::FCMP_TRUE; break;
12197 default: llvm_unreachable("Unhandled CC");
12198 }
12199
12200 // Builtins without the _mask suffix return a vector of integers
12201 // of the same width as the input vectors
12202 switch (BuiltinID) {
12203 case X86::BI__builtin_ia32_cmpps512_mask:
12204 case X86::BI__builtin_ia32_cmppd512_mask:
12205 case X86::BI__builtin_ia32_cmpps128_mask:
12206 case X86::BI__builtin_ia32_cmpps256_mask:
12207 case X86::BI__builtin_ia32_cmppd128_mask:
12208 case X86::BI__builtin_ia32_cmppd256_mask: {
12209 unsigned NumElts = Ops[0]->getType()->getVectorNumElements();
12210 Value *Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
12211 return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
12212 }
12213 default:
12214 return getVectorFCmpIR(Pred);
12215 }
12216 }
12217
12218 // SSE scalar comparison intrinsics
12219 case X86::BI__builtin_ia32_cmpeqss:
12220 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
12221 case X86::BI__builtin_ia32_cmpltss:
12222 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
12223 case X86::BI__builtin_ia32_cmpless:
12224 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
12225 case X86::BI__builtin_ia32_cmpunordss:
12226 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
12227 case X86::BI__builtin_ia32_cmpneqss:
12228 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
12229 case X86::BI__builtin_ia32_cmpnltss:
12230 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
12231 case X86::BI__builtin_ia32_cmpnless:
12232 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
12233 case X86::BI__builtin_ia32_cmpordss:
12234 return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
12235 case X86::BI__builtin_ia32_cmpeqsd:
12236 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
12237 case X86::BI__builtin_ia32_cmpltsd:
12238 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
12239 case X86::BI__builtin_ia32_cmplesd:
12240 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
12241 case X86::BI__builtin_ia32_cmpunordsd:
12242 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
12243 case X86::BI__builtin_ia32_cmpneqsd:
12244 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
12245 case X86::BI__builtin_ia32_cmpnltsd:
12246 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
12247 case X86::BI__builtin_ia32_cmpnlesd:
12248 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
12249 case X86::BI__builtin_ia32_cmpordsd:
12250 return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
12251
12252// AVX512 bf16 intrinsics
12253 case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
12254 Ops[2] = getMaskVecValue(*this, Ops[2],
12255 Ops[0]->getType()->getVectorNumElements());
12256 Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
12257 return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
12258 }
12259 case X86::BI__builtin_ia32_cvtsbf162ss_32:
12260 return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
12261
12262 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
12263 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
12264 Intrinsic::ID IID;
12265 switch (BuiltinID) {
12266 default: llvm_unreachable("Unsupported intrinsic!");
12267 case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
12268 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
12269 break;
12270 case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
12271 IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
12272 break;
12273 }
12274 Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
12275 return EmitX86Select(*this, Ops[2], Res, Ops[1]);
12276 }
12277
12278 case X86::BI__emul:
12279 case X86::BI__emulu: {
12280 llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
12281 bool isSigned = (BuiltinID == X86::BI__emul);
12282 Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
12283 Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
12284 return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
12285 }
12286 case X86::BI__mulh:
12287 case X86::BI__umulh:
12288 case X86::BI_mul128:
12289 case X86::BI_umul128: {
12290 llvm::Type *ResType = ConvertType(E->getType());
12291 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
12292
12293 bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
12294 Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
12295 Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
12296
12297 Value *MulResult, *HigherBits;
12298 if (IsSigned) {
12299 MulResult = Builder.CreateNSWMul(LHS, RHS);
12300 HigherBits = Builder.CreateAShr(MulResult, 64);
12301 } else {
12302 MulResult = Builder.CreateNUWMul(LHS, RHS);
12303 HigherBits = Builder.CreateLShr(MulResult, 64);
12304 }
12305 HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
12306
12307 if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
12308 return HigherBits;
12309
12310 Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
12311 Builder.CreateStore(HigherBits, HighBitsAddress);
12312 return Builder.CreateIntCast(MulResult, ResType, IsSigned);
12313 }
12314
12315 case X86::BI__faststorefence: {
12316 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
12317 llvm::SyncScope::System);
12318 }
12319 case X86::BI__shiftleft128:
12320 case X86::BI__shiftright128: {
12321 // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
12322 // llvm::Function *F = CGM.getIntrinsic(
12323 // BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
12324 // Int64Ty);
12325 // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
12326 // return Builder.CreateCall(F, Ops);
12327 llvm::Type *Int128Ty = Builder.getInt128Ty();
12328 Value *HighPart128 =
12329 Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64);
12330 Value *LowPart128 = Builder.CreateZExt(Ops[0], Int128Ty);
12331 Value *Val = Builder.CreateOr(HighPart128, LowPart128);
12332 Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
12333 llvm::ConstantInt::get(Int128Ty, 0x3f));
12334 Value *Res;
12335 if (BuiltinID == X86::BI__shiftleft128)
12336 Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
12337 else
12338 Res = Builder.CreateLShr(Val, Amt);
12339 return Builder.CreateTrunc(Res, Int64Ty);
12340 }
12341 case X86::BI_ReadWriteBarrier:
12342 case X86::BI_ReadBarrier:
12343 case X86::BI_WriteBarrier: {
12344 return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
12345 llvm::SyncScope::SingleThread);
12346 }
12347 case X86::BI_BitScanForward:
12348 case X86::BI_BitScanForward64:
12349 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
12350 case X86::BI_BitScanReverse:
12351 case X86::BI_BitScanReverse64:
12352 return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
12353
12354 case X86::BI_InterlockedAnd64:
12355 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
12356 case X86::BI_InterlockedExchange64:
12357 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
12358 case X86::BI_InterlockedExchangeAdd64:
12359 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
12360 case X86::BI_InterlockedExchangeSub64:
12361 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
12362 case X86::BI_InterlockedOr64:
12363 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
12364 case X86::BI_InterlockedXor64:
12365 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
12366 case X86::BI_InterlockedDecrement64:
12367 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
12368 case X86::BI_InterlockedIncrement64:
12369 return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
12370 case X86::BI_InterlockedCompareExchange128: {
12371 // InterlockedCompareExchange128 doesn't directly refer to 128bit ints,
12372 // instead it takes pointers to 64bit ints for Destination and
12373 // ComparandResult, and exchange is taken as two 64bit ints (high & low).
12374 // The previous value is written to ComparandResult, and success is
12375 // returned.
12376
12377 llvm::Type *Int128Ty = Builder.getInt128Ty();
12378 llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
12379
12380 Value *Destination =
12381 Builder.CreateBitCast(Ops[0], Int128PtrTy);
12382 Value *ExchangeHigh128 = Builder.CreateZExt(Ops[1], Int128Ty);
12383 Value *ExchangeLow128 = Builder.CreateZExt(Ops[2], Int128Ty);
12384 Address ComparandResult(Builder.CreateBitCast(Ops[3], Int128PtrTy),
12385 getContext().toCharUnitsFromBits(128));
12386
12387 Value *Exchange = Builder.CreateOr(
12388 Builder.CreateShl(ExchangeHigh128, 64, "", false, false),
12389 ExchangeLow128);
12390
12391 Value *Comparand = Builder.CreateLoad(ComparandResult);
12392
12393 AtomicCmpXchgInst *CXI =
12394 Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
12395 AtomicOrdering::SequentiallyConsistent,
12396 AtomicOrdering::SequentiallyConsistent);
12397 CXI->setVolatile(true);
12398
12399 // Write the result back to the inout pointer.
12400 Builder.CreateStore(Builder.CreateExtractValue(CXI, 0), ComparandResult);
12401
12402 // Get the success boolean and zero extend it to i8.
12403 Value *Success = Builder.CreateExtractValue(CXI, 1);
12404 return Builder.CreateZExt(Success, ConvertType(E->getType()));
12405 }
12406
12407 case X86::BI_AddressOfReturnAddress: {
12408 auto *RetTy = CGM.getTarget().areAllPointersCapabilities()
12409 ? CGM.Int8CheriCapTy
12410 : CGM.Int8PtrTy;
12411 Function *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress, {RetTy});
12412 return Builder.CreateCall(F);
12413 }
12414 case X86::BI__stosb: {
12415 // We treat __stosb as a volatile memset - it may not generate "rep stosb"
12416 // instruction, but it will create a memset that won't be optimized away.
12417 return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true);
12418 }
12419 case X86::BI__ud2:
12420 // llvm.trap makes a ud2a instruction on x86.
12421 return EmitTrapCall(Intrinsic::trap);
12422 case X86::BI__int2c: {
12423 // This syscall signals a driver assertion failure in x86 NT kernels.
12424 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
12425 llvm::InlineAsm *IA =
12426 llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*SideEffects=*/true);
12427 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
12428 getLLVMContext(), llvm::AttributeList::FunctionIndex,
12429 llvm::Attribute::NoReturn);
12430 llvm::CallInst *CI = Builder.CreateCall(IA);
12431 CI->setAttributes(NoReturnAttr);
12432 return CI;
12433 }
12434 case X86::BI__readfsbyte:
12435 case X86::BI__readfsword:
12436 case X86::BI__readfsdword:
12437 case X86::BI__readfsqword: {
12438 llvm::Type *IntTy = ConvertType(E->getType());
12439 Value *Ptr =
12440 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
12441 LoadInst *Load = Builder.CreateAlignedLoad(
12442 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
12443 Load->setVolatile(true);
12444 return Load;
12445 }
12446 case X86::BI__readgsbyte:
12447 case X86::BI__readgsword:
12448 case X86::BI__readgsdword:
12449 case X86::BI__readgsqword: {
12450 llvm::Type *IntTy = ConvertType(E->getType());
12451 Value *Ptr =
12452 Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
12453 LoadInst *Load = Builder.CreateAlignedLoad(
12454 IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
12455 Load->setVolatile(true);
12456 return Load;
12457 }
12458 case X86::BI__builtin_ia32_paddsb512:
12459 case X86::BI__builtin_ia32_paddsw512:
12460 case X86::BI__builtin_ia32_paddsb256:
12461 case X86::BI__builtin_ia32_paddsw256:
12462 case X86::BI__builtin_ia32_paddsb128:
12463 case X86::BI__builtin_ia32_paddsw128:
12464 return EmitX86AddSubSatExpr(*this, Ops, true, true);
12465 case X86::BI__builtin_ia32_paddusb512:
12466 case X86::BI__builtin_ia32_paddusw512:
12467 case X86::BI__builtin_ia32_paddusb256:
12468 case X86::BI__builtin_ia32_paddusw256:
12469 case X86::BI__builtin_ia32_paddusb128:
12470 case X86::BI__builtin_ia32_paddusw128:
12471 return EmitX86AddSubSatExpr(*this, Ops, false, true);
12472 case X86::BI__builtin_ia32_psubsb512:
12473 case X86::BI__builtin_ia32_psubsw512:
12474 case X86::BI__builtin_ia32_psubsb256:
12475 case X86::BI__builtin_ia32_psubsw256:
12476 case X86::BI__builtin_ia32_psubsb128:
12477 case X86::BI__builtin_ia32_psubsw128:
12478 return EmitX86AddSubSatExpr(*this, Ops, true, false);
12479 case X86::BI__builtin_ia32_psubusb512:
12480 case X86::BI__builtin_ia32_psubusw512:
12481 case X86::BI__builtin_ia32_psubusb256:
12482 case X86::BI__builtin_ia32_psubusw256:
12483 case X86::BI__builtin_ia32_psubusb128:
12484 case X86::BI__builtin_ia32_psubusw128:
12485 return EmitX86AddSubSatExpr(*this, Ops, false, false);
12486 }
12487}
12488
12489Value *CodeGenFunction::EmitMIPSBuiltinExpr(unsigned BuiltinID,
12490 const CallExpr *E) {
12491 switch (BuiltinID) {
12492 default: return nullptr;
12493
12494 case Mips::BI__builtin_mips_cheri_get_cap_length:
12495 return Builder.CreateCall(
12496 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_length_get, SizeTy),
12497 {EmitScalarExpr(E->getArg(0))});
12498 case Mips::BI__builtin_mips_cheri_get_cap_base:
12499 return Builder.CreateCall(
12500 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_base_get, IntPtrTy),
12501 {EmitScalarExpr(E->getArg(0))});
12502 case Mips::BI__builtin_mips_cheri_and_cap_perms:
12503 return Builder.CreateCall(
12504 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_perms_and, SizeTy),
12505 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
12506 case Mips::BI__builtin_mips_cheri_get_cap_perms:
12507 return Builder.CreateCall(
12508 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_perms_get, SizeTy),
12509 {EmitScalarExpr(E->getArg(0))});
12510 case Mips::BI__builtin_mips_cheri_get_cap_type:
12511 return Builder.CreateCall(
12512 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_type_get, IntPtrTy),
12513 {EmitScalarExpr(E->getArg(0))});
12514 case Mips::BI__builtin_mips_cheri_check_perms:
12515 return Builder.CreateCall(
12516 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_perms_check, SizeTy),
12517 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
12518 case Mips::BI__builtin_mips_cheri_cap_offset_increment:
12519 return Builder.CreateCall(
12520 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_offset_increment, SizeTy),
12521 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
12522 case Mips::BI__builtin_mips_cheri_cap_offset_set:
12523 return Builder.CreateCall(
12524 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_offset_set, SizeTy),
12525 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
12526 case Mips::BI__builtin_mips_cheri_cap_offset_get:
12527 return Builder.CreateCall(
12528 CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_offset_get, SizeTy),
12529 {EmitScalarExpr(E->getArg(0))});
12530 }
12531}
12532
12533Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
12534 const CallExpr *E) {
12535 SmallVector<Value*, 4> Ops;
12536
12537 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
12538 Ops.push_back(EmitScalarExpr(E->getArg(i)));
12539
12540 Intrinsic::ID ID = Intrinsic::not_intrinsic;
12541
12542 switch (BuiltinID) {
12543 default: return nullptr;
12544
12545 // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
12546 // call __builtin_readcyclecounter.
12547 case PPC::BI__builtin_ppc_get_timebase:
12548 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
12549
12550 // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
12551 case PPC::BI__builtin_altivec_lvx:
12552 case PPC::BI__builtin_altivec_lvxl:
12553 case PPC::BI__builtin_altivec_lvebx:
12554 case PPC::BI__builtin_altivec_lvehx:
12555 case PPC::BI__builtin_altivec_lvewx:
12556 case PPC::BI__builtin_altivec_lvsl:
12557 case PPC::BI__builtin_altivec_lvsr:
12558 case PPC::BI__builtin_vsx_lxvd2x:
12559 case PPC::BI__builtin_vsx_lxvw4x:
12560 case PPC::BI__builtin_vsx_lxvd2x_be:
12561 case PPC::BI__builtin_vsx_lxvw4x_be:
12562 case PPC::BI__builtin_vsx_lxvl:
12563 case PPC::BI__builtin_vsx_lxvll:
12564 {
12565 if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
12566 BuiltinID == PPC::BI__builtin_vsx_lxvll){
12567 Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
12568 }else {
12569 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
12570 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
12571 Ops.pop_back();
12572 }
12573
12574 switch (BuiltinID) {
12575 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
12576 case PPC::BI__builtin_altivec_lvx:
12577 ID = Intrinsic::ppc_altivec_lvx;
12578 break;
12579 case PPC::BI__builtin_altivec_lvxl:
12580 ID = Intrinsic::ppc_altivec_lvxl;
12581 break;
12582 case PPC::BI__builtin_altivec_lvebx:
12583 ID = Intrinsic::ppc_altivec_lvebx;
12584 break;
12585 case PPC::BI__builtin_altivec_lvehx:
12586 ID = Intrinsic::ppc_altivec_lvehx;
12587 break;
12588 case PPC::BI__builtin_altivec_lvewx:
12589 ID = Intrinsic::ppc_altivec_lvewx;
12590 break;
12591 case PPC::BI__builtin_altivec_lvsl:
12592 ID = Intrinsic::ppc_altivec_lvsl;
12593 break;
12594 case PPC::BI__builtin_altivec_lvsr:
12595 ID = Intrinsic::ppc_altivec_lvsr;
12596 break;
12597 case PPC::BI__builtin_vsx_lxvd2x:
12598 ID = Intrinsic::ppc_vsx_lxvd2x;
12599 break;
12600 case PPC::BI__builtin_vsx_lxvw4x:
12601 ID = Intrinsic::ppc_vsx_lxvw4x;
12602 break;
12603 case PPC::BI__builtin_vsx_lxvd2x_be:
12604 ID = Intrinsic::ppc_vsx_lxvd2x_be;
12605 break;
12606 case PPC::BI__builtin_vsx_lxvw4x_be:
12607 ID = Intrinsic::ppc_vsx_lxvw4x_be;
12608 break;
12609 case PPC::BI__builtin_vsx_lxvl:
12610 ID = Intrinsic::ppc_vsx_lxvl;
12611 break;
12612 case PPC::BI__builtin_vsx_lxvll:
12613 ID = Intrinsic::ppc_vsx_lxvll;
12614 break;
12615 }
12616 llvm::Function *F = CGM.getIntrinsic(ID);
12617 return Builder.CreateCall(F, Ops, "");
12618 }
12619
12620 // vec_st, vec_xst_be
12621 case PPC::BI__builtin_altivec_stvx:
12622 case PPC::BI__builtin_altivec_stvxl:
12623 case PPC::BI__builtin_altivec_stvebx:
12624 case PPC::BI__builtin_altivec_stvehx:
12625 case PPC::BI__builtin_altivec_stvewx:
12626 case PPC::BI__builtin_vsx_stxvd2x:
12627 case PPC::BI__builtin_vsx_stxvw4x:
12628 case PPC::BI__builtin_vsx_stxvd2x_be:
12629 case PPC::BI__builtin_vsx_stxvw4x_be:
12630 case PPC::BI__builtin_vsx_stxvl:
12631 case PPC::BI__builtin_vsx_stxvll:
12632 {
12633 if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
12634 BuiltinID == PPC::BI__builtin_vsx_stxvll ){
12635 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
12636 }else {
12637 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
12638 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
12639 Ops.pop_back();
12640 }
12641
12642 switch (BuiltinID) {
12643 default: llvm_unreachable("Unsupported st intrinsic!");
12644 case PPC::BI__builtin_altivec_stvx:
12645 ID = Intrinsic::ppc_altivec_stvx;
12646 break;
12647 case PPC::BI__builtin_altivec_stvxl:
12648 ID = Intrinsic::ppc_altivec_stvxl;
12649 break;
12650 case PPC::BI__builtin_altivec_stvebx:
12651 ID = Intrinsic::ppc_altivec_stvebx;
12652 break;
12653 case PPC::BI__builtin_altivec_stvehx:
12654 ID = Intrinsic::ppc_altivec_stvehx;
12655 break;
12656 case PPC::BI__builtin_altivec_stvewx:
12657 ID = Intrinsic::ppc_altivec_stvewx;
12658 break;
12659 case PPC::BI__builtin_vsx_stxvd2x:
12660 ID = Intrinsic::ppc_vsx_stxvd2x;
12661 break;
12662 case PPC::BI__builtin_vsx_stxvw4x:
12663 ID = Intrinsic::ppc_vsx_stxvw4x;
12664 break;
12665 case PPC::BI__builtin_vsx_stxvd2x_be:
12666 ID = Intrinsic::ppc_vsx_stxvd2x_be;
12667 break;
12668 case PPC::BI__builtin_vsx_stxvw4x_be:
12669 ID = Intrinsic::ppc_vsx_stxvw4x_be;
12670 break;
12671 case PPC::BI__builtin_vsx_stxvl:
12672 ID = Intrinsic::ppc_vsx_stxvl;
12673 break;
12674 case PPC::BI__builtin_vsx_stxvll:
12675 ID = Intrinsic::ppc_vsx_stxvll;
12676 break;
12677 }
12678 llvm::Function *F = CGM.getIntrinsic(ID);
12679 return Builder.CreateCall(F, Ops, "");
12680 }
12681 // Square root
12682 case PPC::BI__builtin_vsx_xvsqrtsp:
12683 case PPC::BI__builtin_vsx_xvsqrtdp: {
12684 llvm::Type *ResultType = ConvertType(E->getType());
12685 Value *X = EmitScalarExpr(E->getArg(0));
12686 ID = Intrinsic::sqrt;
12687 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12688 return Builder.CreateCall(F, X);
12689 }
12690 // Count leading zeros
12691 case PPC::BI__builtin_altivec_vclzb:
12692 case PPC::BI__builtin_altivec_vclzh:
12693 case PPC::BI__builtin_altivec_vclzw:
12694 case PPC::BI__builtin_altivec_vclzd: {
12695 llvm::Type *ResultType = ConvertType(E->getType());
12696 Value *X = EmitScalarExpr(E->getArg(0));
12697 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
12698 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
12699 return Builder.CreateCall(F, {X, Undef});
12700 }
12701 case PPC::BI__builtin_altivec_vctzb:
12702 case PPC::BI__builtin_altivec_vctzh:
12703 case PPC::BI__builtin_altivec_vctzw:
12704 case PPC::BI__builtin_altivec_vctzd: {
12705 llvm::Type *ResultType = ConvertType(E->getType());
12706 Value *X = EmitScalarExpr(E->getArg(0));
12707 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
12708 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
12709 return Builder.CreateCall(F, {X, Undef});
12710 }
12711 case PPC::BI__builtin_altivec_vpopcntb:
12712 case PPC::BI__builtin_altivec_vpopcnth:
12713 case PPC::BI__builtin_altivec_vpopcntw:
12714 case PPC::BI__builtin_altivec_vpopcntd: {
12715 llvm::Type *ResultType = ConvertType(E->getType());
12716 Value *X = EmitScalarExpr(E->getArg(0));
12717 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12718 return Builder.CreateCall(F, X);
12719 }
12720 // Copy sign
12721 case PPC::BI__builtin_vsx_xvcpsgnsp:
12722 case PPC::BI__builtin_vsx_xvcpsgndp: {
12723 llvm::Type *ResultType = ConvertType(E->getType());
12724 Value *X = EmitScalarExpr(E->getArg(0));
12725 Value *Y = EmitScalarExpr(E->getArg(1));
12726 ID = Intrinsic::copysign;
12727 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12728 return Builder.CreateCall(F, {X, Y});
12729 }
12730 // Rounding/truncation
12731 case PPC::BI__builtin_vsx_xvrspip:
12732 case PPC::BI__builtin_vsx_xvrdpip:
12733 case PPC::BI__builtin_vsx_xvrdpim:
12734 case PPC::BI__builtin_vsx_xvrspim:
12735 case PPC::BI__builtin_vsx_xvrdpi:
12736 case PPC::BI__builtin_vsx_xvrspi:
12737 case PPC::BI__builtin_vsx_xvrdpic:
12738 case PPC::BI__builtin_vsx_xvrspic:
12739 case PPC::BI__builtin_vsx_xvrdpiz:
12740 case PPC::BI__builtin_vsx_xvrspiz: {
12741 llvm::Type *ResultType = ConvertType(E->getType());
12742 Value *X = EmitScalarExpr(E->getArg(0));
12743 if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
12744 BuiltinID == PPC::BI__builtin_vsx_xvrspim)
12745 ID = Intrinsic::floor;
12746 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
12747 BuiltinID == PPC::BI__builtin_vsx_xvrspi)
12748 ID = Intrinsic::round;
12749 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
12750 BuiltinID == PPC::BI__builtin_vsx_xvrspic)
12751 ID = Intrinsic::nearbyint;
12752 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
12753 BuiltinID == PPC::BI__builtin_vsx_xvrspip)
12754 ID = Intrinsic::ceil;
12755 else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
12756 BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
12757 ID = Intrinsic::trunc;
12758 llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
12759 return Builder.CreateCall(F, X);
12760 }
12761
12762 // Absolute value
12763 case PPC::BI__builtin_vsx_xvabsdp:
12764 case PPC::BI__builtin_vsx_xvabssp: {
12765 llvm::Type *ResultType = ConvertType(E->getType());
12766 Value *X = EmitScalarExpr(E->getArg(0));
12767 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
12768 return Builder.CreateCall(F, X);
12769 }
12770
12771 // FMA variations
12772 case PPC::BI__builtin_vsx_xvmaddadp:
12773 case PPC::BI__builtin_vsx_xvmaddasp:
12774 case PPC::BI__builtin_vsx_xvnmaddadp:
12775 case PPC::BI__builtin_vsx_xvnmaddasp:
12776 case PPC::BI__builtin_vsx_xvmsubadp:
12777 case PPC::BI__builtin_vsx_xvmsubasp:
12778 case PPC::BI__builtin_vsx_xvnmsubadp:
12779 case PPC::BI__builtin_vsx_xvnmsubasp: {
12780 llvm::Type *ResultType = ConvertType(E->getType());
12781 Value *X = EmitScalarExpr(E->getArg(0));
12782 Value *Y = EmitScalarExpr(E->getArg(1));
12783 Value *Z = EmitScalarExpr(E->getArg(2));
12784 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
12785 llvm::Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
12786 switch (BuiltinID) {
12787 case PPC::BI__builtin_vsx_xvmaddadp:
12788 case PPC::BI__builtin_vsx_xvmaddasp:
12789 return Builder.CreateCall(F, {X, Y, Z});
12790 case PPC::BI__builtin_vsx_xvnmaddadp:
12791 case PPC::BI__builtin_vsx_xvnmaddasp:
12792 return Builder.CreateFSub(Zero,
12793 Builder.CreateCall(F, {X, Y, Z}), "sub");
12794 case PPC::BI__builtin_vsx_xvmsubadp:
12795 case PPC::BI__builtin_vsx_xvmsubasp:
12796 return Builder.CreateCall(F,
12797 {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
12798 case PPC::BI__builtin_vsx_xvnmsubadp:
12799 case PPC::BI__builtin_vsx_xvnmsubasp:
12800 Value *FsubRes =
12801 Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
12802 return Builder.CreateFSub(Zero, FsubRes, "sub");
12803 }
12804 llvm_unreachable("Unknown FMA operation");
12805 return nullptr; // Suppress no-return warning
12806 }
12807
12808 case PPC::BI__builtin_vsx_insertword: {
12809 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
12810
12811 // Third argument is a compile time constant int. It must be clamped to
12812 // to the range [0, 12].
12813 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12814 assert(ArgCI &&
12815 "Third arg to xxinsertw intrinsic must be constant integer");
12816 const int64_t MaxIndex = 12;
12817 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
12818
12819 // The builtin semantics don't exactly match the xxinsertw instructions
12820 // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
12821 // word from the first argument, and inserts it in the second argument. The
12822 // instruction extracts the word from its second input register and inserts
12823 // it into its first input register, so swap the first and second arguments.
12824 std::swap(Ops[0], Ops[1]);
12825
12826 // Need to cast the second argument from a vector of unsigned int to a
12827 // vector of long long.
12828 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
12829
12830 if (getTarget().isLittleEndian()) {
12831 // Create a shuffle mask of (1, 0)
12832 Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
12833 ConstantInt::get(Int32Ty, 0)
12834 };
12835 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12836
12837 // Reverse the double words in the vector we will extract from.
12838 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12839 Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ShuffleMask);
12840
12841 // Reverse the index.
12842 Index = MaxIndex - Index;
12843 }
12844
12845 // Intrinsic expects the first arg to be a vector of int.
12846 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
12847 Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
12848 return Builder.CreateCall(F, Ops);
12849 }
12850
12851 case PPC::BI__builtin_vsx_extractuword: {
12852 llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
12853
12854 // Intrinsic expects the first argument to be a vector of doublewords.
12855 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12856
12857 // The second argument is a compile time constant int that needs to
12858 // be clamped to the range [0, 12].
12859 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
12860 assert(ArgCI &&
12861 "Second Arg to xxextractuw intrinsic must be a constant integer!");
12862 const int64_t MaxIndex = 12;
12863 int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
12864
12865 if (getTarget().isLittleEndian()) {
12866 // Reverse the index.
12867 Index = MaxIndex - Index;
12868 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
12869
12870 // Emit the call, then reverse the double words of the results vector.
12871 Value *Call = Builder.CreateCall(F, Ops);
12872
12873 // Create a shuffle mask of (1, 0)
12874 Constant *ShuffleElts[2] = { ConstantInt::get(Int32Ty, 1),
12875 ConstantInt::get(Int32Ty, 0)
12876 };
12877 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12878
12879 Value *ShuffleCall = Builder.CreateShuffleVector(Call, Call, ShuffleMask);
12880 return ShuffleCall;
12881 } else {
12882 Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
12883 return Builder.CreateCall(F, Ops);
12884 }
12885 }
12886
12887 case PPC::BI__builtin_vsx_xxpermdi: {
12888 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12889 assert(ArgCI && "Third arg must be constant integer!");
12890
12891 unsigned Index = ArgCI->getZExtValue();
12892 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int64Ty, 2));
12893 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int64Ty, 2));
12894
12895 // Account for endianness by treating this as just a shuffle. So we use the
12896 // same indices for both LE and BE in order to produce expected results in
12897 // both cases.
12898 unsigned ElemIdx0 = (Index & 2) >> 1;
12899 unsigned ElemIdx1 = 2 + (Index & 1);
12900
12901 Constant *ShuffleElts[2] = {ConstantInt::get(Int32Ty, ElemIdx0),
12902 ConstantInt::get(Int32Ty, ElemIdx1)};
12903 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12904
12905 Value *ShuffleCall =
12906 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
12907 QualType BIRetType = E->getType();
12908 auto RetTy = ConvertType(BIRetType);
12909 return Builder.CreateBitCast(ShuffleCall, RetTy);
12910 }
12911
12912 case PPC::BI__builtin_vsx_xxsldwi: {
12913 ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
12914 assert(ArgCI && "Third argument must be a compile time constant");
12915 unsigned Index = ArgCI->getZExtValue() & 0x3;
12916 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::VectorType::get(Int32Ty, 4));
12917 Ops[1] = Builder.CreateBitCast(Ops[1], llvm::VectorType::get(Int32Ty, 4));
12918
12919 // Create a shuffle mask
12920 unsigned ElemIdx0;
12921 unsigned ElemIdx1;
12922 unsigned ElemIdx2;
12923 unsigned ElemIdx3;
12924 if (getTarget().isLittleEndian()) {
12925 // Little endian element N comes from element 8+N-Index of the
12926 // concatenated wide vector (of course, using modulo arithmetic on
12927 // the total number of elements).
12928 ElemIdx0 = (8 - Index) % 8;
12929 ElemIdx1 = (9 - Index) % 8;
12930 ElemIdx2 = (10 - Index) % 8;
12931 ElemIdx3 = (11 - Index) % 8;
12932 } else {
12933 // Big endian ElemIdx<N> = Index + N
12934 ElemIdx0 = Index;
12935 ElemIdx1 = Index + 1;
12936 ElemIdx2 = Index + 2;
12937 ElemIdx3 = Index + 3;
12938 }
12939
12940 Constant *ShuffleElts[4] = {ConstantInt::get(Int32Ty, ElemIdx0),
12941 ConstantInt::get(Int32Ty, ElemIdx1),
12942 ConstantInt::get(Int32Ty, ElemIdx2),
12943 ConstantInt::get(Int32Ty, ElemIdx3)};
12944
12945 Constant *ShuffleMask = llvm::ConstantVector::get(ShuffleElts);
12946 Value *ShuffleCall =
12947 Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleMask);
12948 QualType BIRetType = E->getType();
12949 auto RetTy = ConvertType(BIRetType);
12950 return Builder.CreateBitCast(ShuffleCall, RetTy);
12951 }
12952
12953 case PPC::BI__builtin_pack_vector_int128: {
12954 bool isLittleEndian = getTarget().isLittleEndian();
12955 Value *UndefValue =
12956 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), 2));
12957 Value *Res = Builder.CreateInsertElement(
12958 UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
12959 Res = Builder.CreateInsertElement(Res, Ops[1],
12960 (uint64_t)(isLittleEndian ? 0 : 1));
12961 return Builder.CreateBitCast(Res, ConvertType(E->getType()));
12962 }
12963
12964 case PPC::BI__builtin_unpack_vector_int128: {
12965 ConstantInt *Index = cast<ConstantInt>(Ops[1]);
12966 Value *Unpacked = Builder.CreateBitCast(
12967 Ops[0], llvm::VectorType::get(ConvertType(E->getType()), 2));
12968
12969 if (getTarget().isLittleEndian())
12970 Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
12971
12972 return Builder.CreateExtractElement(Unpacked, Index);
12973 }
12974 }
12975}
12976
12977Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
12978 const CallExpr *E) {
12979 switch (BuiltinID) {
12980 case AMDGPU::BI__builtin_amdgcn_div_scale:
12981 case AMDGPU::BI__builtin_amdgcn_div_scalef: {
12982 // Translate from the intrinsics's struct return to the builtin's out
12983 // argument.
12984
12985 Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
12986
12987 llvm::Value *X = EmitScalarExpr(E->getArg(0));
12988 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
12989 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
12990
12991 llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
12992 X->getType());
12993
12994 llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
12995
12996 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
12997 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
12998
12999 llvm::Type *RealFlagType
13000 = FlagOutPtr.getPointer()->getType()->getPointerElementType();
13001
13002 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
13003 Builder.CreateStore(FlagExt, FlagOutPtr);
13004 return Result;
13005 }
13006 case AMDGPU::BI__builtin_amdgcn_div_fmas:
13007 case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
13008 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
13009 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
13010 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
13011 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
13012
13013 llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
13014 Src0->getType());
13015 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
13016 return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
13017 }
13018
13019 case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
13020 return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
13021 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
13022 case AMDGPU::BI__builtin_amdgcn_update_dpp: {
13023 llvm::SmallVector<llvm::Value *, 6> Args;
13024 for (unsigned I = 0; I != E->getNumArgs(); ++I)
13025 Args.push_back(EmitScalarExpr(E->getArg(I)));
13026 assert(Args.size() == 5 || Args.size() == 6);
13027 if (Args.size() == 5)
13028 Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
13029 Function *F =
13030 CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
13031 return Builder.CreateCall(F, Args);
13032 }
13033 case AMDGPU::BI__builtin_amdgcn_div_fixup:
13034 case AMDGPU::BI__builtin_amdgcn_div_fixupf:
13035 case AMDGPU::BI__builtin_amdgcn_div_fixuph:
13036 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
13037 case AMDGPU::BI__builtin_amdgcn_trig_preop:
13038 case AMDGPU::BI__builtin_amdgcn_trig_preopf:
13039 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
13040 case AMDGPU::BI__builtin_amdgcn_rcp:
13041 case AMDGPU::BI__builtin_amdgcn_rcpf:
13042 case AMDGPU::BI__builtin_amdgcn_rcph:
13043 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
13044 case AMDGPU::BI__builtin_amdgcn_rsq:
13045 case AMDGPU::BI__builtin_amdgcn_rsqf:
13046 case AMDGPU::BI__builtin_amdgcn_rsqh:
13047 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
13048 case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
13049 case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
13050 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
13051 case AMDGPU::BI__builtin_amdgcn_sinf:
13052 case AMDGPU::BI__builtin_amdgcn_sinh:
13053 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
13054 case AMDGPU::BI__builtin_amdgcn_cosf:
13055 case AMDGPU::BI__builtin_amdgcn_cosh:
13056 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
13057 case AMDGPU::BI__builtin_amdgcn_log_clampf:
13058 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
13059 case AMDGPU::BI__builtin_amdgcn_ldexp:
13060 case AMDGPU::BI__builtin_amdgcn_ldexpf:
13061 case AMDGPU::BI__builtin_amdgcn_ldexph:
13062 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
13063 case AMDGPU::BI__builtin_amdgcn_frexp_mant:
13064 case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
13065 case AMDGPU::BI__builtin_amdgcn_frexp_manth:
13066 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
13067 case AMDGPU::BI__builtin_amdgcn_frexp_exp:
13068 case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
13069 Value *Src0 = EmitScalarExpr(E->getArg(0));
13070 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
13071 { Builder.getInt32Ty(), Src0->getType() });
13072 return Builder.CreateCall(F, Src0);
13073 }
13074 case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
13075 Value *Src0 = EmitScalarExpr(E->getArg(0));
13076 Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
13077 { Builder.getInt16Ty(), Src0->getType() });
13078 return Builder.CreateCall(F, Src0);
13079 }
13080 case AMDGPU::BI__builtin_amdgcn_fract:
13081 case AMDGPU::BI__builtin_amdgcn_fractf:
13082 case AMDGPU::BI__builtin_amdgcn_fracth:
13083 return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
13084 case AMDGPU::BI__builtin_amdgcn_lerp:
13085 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
13086 case AMDGPU::BI__builtin_amdgcn_uicmp:
13087 case AMDGPU::BI__builtin_amdgcn_uicmpl:
13088 case AMDGPU::BI__builtin_amdgcn_sicmp:
13089 case AMDGPU::BI__builtin_amdgcn_sicmpl:
13090 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_icmp);
13091 case AMDGPU::BI__builtin_amdgcn_fcmp:
13092 case AMDGPU::BI__builtin_amdgcn_fcmpf:
13093 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fcmp);
13094 case AMDGPU::BI__builtin_amdgcn_class:
13095 case AMDGPU::BI__builtin_amdgcn_classf:
13096 case AMDGPU::BI__builtin_amdgcn_classh:
13097 return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
13098 case AMDGPU::BI__builtin_amdgcn_fmed3f:
13099 case AMDGPU::BI__builtin_amdgcn_fmed3h:
13100 return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
13101 case AMDGPU::BI__builtin_amdgcn_ds_append:
13102 case AMDGPU::BI__builtin_amdgcn_ds_consume: {
13103 Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
13104 Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
13105 Value *Src0 = EmitScalarExpr(E->getArg(0));
13106 Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
13107 return Builder.CreateCall(F, { Src0, Builder.getFalse() });
13108 }
13109 case AMDGPU::BI__builtin_amdgcn_read_exec: {
13110 CallInst *CI = cast<CallInst>(
13111 EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
13112 CI->setConvergent();
13113 return CI;
13114 }
13115 case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
13116 case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
13117 StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
13118 "exec_lo" : "exec_hi";
13119 CallInst *CI = cast<CallInst>(
13120 EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, true, RegName));
13121 CI->setConvergent();
13122 return CI;
13123 }
13124 // amdgcn workitem
13125 case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
13126 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
13127 case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
13128 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
13129 case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
13130 return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
13131
13132 // r600 intrinsics
13133 case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
13134 case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
13135 return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
13136 case AMDGPU::BI__builtin_r600_read_tidig_x:
13137 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
13138 case AMDGPU::BI__builtin_r600_read_tidig_y:
13139 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
13140 case AMDGPU::BI__builtin_r600_read_tidig_z:
13141 return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
13142 default:
13143 return nullptr;
13144 }
13145}
13146
13147/// Handle a SystemZ function in which the final argument is a pointer
13148/// to an int that receives the post-instruction CC value. At the LLVM level
13149/// this is represented as a function that returns a {result, cc} pair.
13150static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
13151 unsigned IntrinsicID,
13152 const CallExpr *E) {
13153 unsigned NumArgs = E->getNumArgs() - 1;
13154 SmallVector<Value *, 8> Args(NumArgs);
13155 for (unsigned I = 0; I < NumArgs; ++I)
13156 Args[I] = CGF.EmitScalarExpr(E->getArg(I));
13157 Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
13158 Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
13159 Value *Call = CGF.Builder.CreateCall(F, Args);
13160 Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
13161 CGF.Builder.CreateStore(CC, CCPtr);
13162 return CGF.Builder.CreateExtractValue(Call, 0);
13163}
13164
13165Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
13166 const CallExpr *E) {
13167 switch (BuiltinID) {
13168 case SystemZ::BI__builtin_tbegin: {
13169 Value *TDB = EmitScalarExpr(E->getArg(0));
13170 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
13171 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
13172 return Builder.CreateCall(F, {TDB, Control});
13173 }
13174 case SystemZ::BI__builtin_tbegin_nofloat: {
13175 Value *TDB = EmitScalarExpr(E->getArg(0));
13176 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
13177 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
13178 return Builder.CreateCall(F, {TDB, Control});
13179 }
13180 case SystemZ::BI__builtin_tbeginc: {
13181 Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
13182 Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
13183 Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
13184 return Builder.CreateCall(F, {TDB, Control});
13185 }
13186 case SystemZ::BI__builtin_tabort: {
13187 Value *Data = EmitScalarExpr(E->getArg(0));
13188 Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
13189 return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
13190 }
13191 case SystemZ::BI__builtin_non_tx_store: {
13192 Value *Address = EmitScalarExpr(E->getArg(0));
13193 Value *Data = EmitScalarExpr(E->getArg(1));
13194 Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
13195 return Builder.CreateCall(F, {Data, Address});
13196 }
13197
13198 // Vector builtins. Note that most vector builtins are mapped automatically
13199 // to target-specific LLVM intrinsics. The ones handled specially here can
13200 // be represented via standard LLVM IR, which is preferable to enable common
13201 // LLVM optimizations.
13202
13203 case SystemZ::BI__builtin_s390_vpopctb:
13204 case SystemZ::BI__builtin_s390_vpopcth:
13205 case SystemZ::BI__builtin_s390_vpopctf:
13206 case SystemZ::BI__builtin_s390_vpopctg: {
13207 llvm::Type *ResultType = ConvertType(E->getType());
13208 Value *X = EmitScalarExpr(E->getArg(0));
13209 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
13210 return Builder.CreateCall(F, X);
13211 }
13212
13213 case SystemZ::BI__builtin_s390_vclzb:
13214 case SystemZ::BI__builtin_s390_vclzh:
13215 case SystemZ::BI__builtin_s390_vclzf:
13216 case SystemZ::BI__builtin_s390_vclzg: {
13217 llvm::Type *ResultType = ConvertType(E->getType());
13218 Value *X = EmitScalarExpr(E->getArg(0));
13219 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
13220 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
13221 return Builder.CreateCall(F, {X, Undef});
13222 }
13223
13224 case SystemZ::BI__builtin_s390_vctzb:
13225 case SystemZ::BI__builtin_s390_vctzh:
13226 case SystemZ::BI__builtin_s390_vctzf:
13227 case SystemZ::BI__builtin_s390_vctzg: {
13228 llvm::Type *ResultType = ConvertType(E->getType());
13229 Value *X = EmitScalarExpr(E->getArg(0));
13230 Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
13231 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
13232 return Builder.CreateCall(F, {X, Undef});
13233 }
13234
13235 case SystemZ::BI__builtin_s390_vfsqsb:
13236 case SystemZ::BI__builtin_s390_vfsqdb: {
13237 llvm::Type *ResultType = ConvertType(E->getType());
13238 Value *X = EmitScalarExpr(E->getArg(0));
13239 Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
13240 return Builder.CreateCall(F, X);
13241 }
13242 case SystemZ::BI__builtin_s390_vfmasb:
13243 case SystemZ::BI__builtin_s390_vfmadb: {
13244 llvm::Type *ResultType = ConvertType(E->getType());
13245 Value *X = EmitScalarExpr(E->getArg(0));
13246 Value *Y = EmitScalarExpr(E->getArg(1));
13247 Value *Z = EmitScalarExpr(E->getArg(2));
13248 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13249 return Builder.CreateCall(F, {X, Y, Z});
13250 }
13251 case SystemZ::BI__builtin_s390_vfmssb:
13252 case SystemZ::BI__builtin_s390_vfmsdb: {
13253 llvm::Type *ResultType = ConvertType(E->getType());
13254 Value *X = EmitScalarExpr(E->getArg(0));
13255 Value *Y = EmitScalarExpr(E->getArg(1));
13256 Value *Z = EmitScalarExpr(E->getArg(2));
13257 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
13258 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13259 return Builder.CreateCall(F, {X, Y, Builder.CreateFSub(Zero, Z, "sub")});
13260 }
13261 case SystemZ::BI__builtin_s390_vfnmasb:
13262 case SystemZ::BI__builtin_s390_vfnmadb: {
13263 llvm::Type *ResultType = ConvertType(E->getType());
13264 Value *X = EmitScalarExpr(E->getArg(0));
13265 Value *Y = EmitScalarExpr(E->getArg(1));
13266 Value *Z = EmitScalarExpr(E->getArg(2));
13267 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
13268 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13269 return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, Z}), "sub");
13270 }
13271 case SystemZ::BI__builtin_s390_vfnmssb:
13272 case SystemZ::BI__builtin_s390_vfnmsdb: {
13273 llvm::Type *ResultType = ConvertType(E->getType());
13274 Value *X = EmitScalarExpr(E->getArg(0));
13275 Value *Y = EmitScalarExpr(E->getArg(1));
13276 Value *Z = EmitScalarExpr(E->getArg(2));
13277 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
13278 Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
13279 Value *NegZ = Builder.CreateFSub(Zero, Z, "sub");
13280 return Builder.CreateFSub(Zero, Builder.CreateCall(F, {X, Y, NegZ}));
13281 }
13282 case SystemZ::BI__builtin_s390_vflpsb:
13283 case SystemZ::BI__builtin_s390_vflpdb: {
13284 llvm::Type *ResultType = ConvertType(E->getType());
13285 Value *X = EmitScalarExpr(E->getArg(0));
13286 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
13287 return Builder.CreateCall(F, X);
13288 }
13289 case SystemZ::BI__builtin_s390_vflnsb:
13290 case SystemZ::BI__builtin_s390_vflndb: {
13291 llvm::Type *ResultType = ConvertType(E->getType());
13292 Value *X = EmitScalarExpr(E->getArg(0));
13293 Value *Zero = llvm::ConstantFP::getZeroValueForNegation(ResultType);
13294 Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
13295 return Builder.CreateFSub(Zero, Builder.CreateCall(F, X), "sub");
13296 }
13297 case SystemZ::BI__builtin_s390_vfisb:
13298 case SystemZ::BI__builtin_s390_vfidb: {
13299 llvm::Type *ResultType = ConvertType(E->getType());
13300 Value *X = EmitScalarExpr(E->getArg(0));
13301 // Constant-fold the M4 and M5 mask arguments.
13302 llvm::APSInt M4, M5;
13303 bool IsConstM4 = E->getArg(1)->isIntegerConstantExpr(M4, getContext());
13304 bool IsConstM5 = E->getArg(2)->isIntegerConstantExpr(M5, getContext());
13305 assert(IsConstM4 && IsConstM5 && "Constant arg isn't actually constant?");
13306 (void)IsConstM4; (void)IsConstM5;
13307 // Check whether this instance can be represented via a LLVM standard
13308 // intrinsic. We only support some combinations of M4 and M5.
13309 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13310 switch (M4.getZExtValue()) {
13311 default: break;
13312 case 0: // IEEE-inexact exception allowed
13313 switch (M5.getZExtValue()) {
13314 default: break;
13315 case 0: ID = Intrinsic::rint; break;
13316 }
13317 break;
13318 case 4: // IEEE-inexact exception suppressed
13319 switch (M5.getZExtValue()) {
13320 default: break;
13321 case 0: ID = Intrinsic::nearbyint; break;
13322 case 1: ID = Intrinsic::round; break;
13323 case 5: ID = Intrinsic::trunc; break;
13324 case 6: ID = Intrinsic::ceil; break;
13325 case 7: ID = Intrinsic::floor; break;
13326 }
13327 break;
13328 }
13329 if (ID != Intrinsic::not_intrinsic) {
13330 Function *F = CGM.getIntrinsic(ID, ResultType);
13331 return Builder.CreateCall(F, X);
13332 }
13333 switch (BuiltinID) {
13334 case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
13335 case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
13336 default: llvm_unreachable("Unknown BuiltinID");
13337 }
13338 Function *F = CGM.getIntrinsic(ID);
13339 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13340 Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
13341 return Builder.CreateCall(F, {X, M4Value, M5Value});
13342 }
13343 case SystemZ::BI__builtin_s390_vfmaxsb:
13344 case SystemZ::BI__builtin_s390_vfmaxdb: {
13345 llvm::Type *ResultType = ConvertType(E->getType());
13346 Value *X = EmitScalarExpr(E->getArg(0));
13347 Value *Y = EmitScalarExpr(E->getArg(1));
13348 // Constant-fold the M4 mask argument.
13349 llvm::APSInt M4;
13350 bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
13351 assert(IsConstM4 && "Constant arg isn't actually constant?");
13352 (void)IsConstM4;
13353 // Check whether this instance can be represented via a LLVM standard
13354 // intrinsic. We only support some values of M4.
13355 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13356 switch (M4.getZExtValue()) {
13357 default: break;
13358 case 4: ID = Intrinsic::maxnum; break;
13359 }
13360 if (ID != Intrinsic::not_intrinsic) {
13361 Function *F = CGM.getIntrinsic(ID, ResultType);
13362 return Builder.CreateCall(F, {X, Y});
13363 }
13364 switch (BuiltinID) {
13365 case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
13366 case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
13367 default: llvm_unreachable("Unknown BuiltinID");
13368 }
13369 Function *F = CGM.getIntrinsic(ID);
13370 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13371 return Builder.CreateCall(F, {X, Y, M4Value});
13372 }
13373 case SystemZ::BI__builtin_s390_vfminsb:
13374 case SystemZ::BI__builtin_s390_vfmindb: {
13375 llvm::Type *ResultType = ConvertType(E->getType());
13376 Value *X = EmitScalarExpr(E->getArg(0));
13377 Value *Y = EmitScalarExpr(E->getArg(1));
13378 // Constant-fold the M4 mask argument.
13379 llvm::APSInt M4;
13380 bool IsConstM4 = E->getArg(2)->isIntegerConstantExpr(M4, getContext());
13381 assert(IsConstM4 && "Constant arg isn't actually constant?");
13382 (void)IsConstM4;
13383 // Check whether this instance can be represented via a LLVM standard
13384 // intrinsic. We only support some values of M4.
13385 Intrinsic::ID ID = Intrinsic::not_intrinsic;
13386 switch (M4.getZExtValue()) {
13387 default: break;
13388 case 4: ID = Intrinsic::minnum; break;
13389 }
13390 if (ID != Intrinsic::not_intrinsic) {
13391 Function *F = CGM.getIntrinsic(ID, ResultType);
13392 return Builder.CreateCall(F, {X, Y});
13393 }
13394 switch (BuiltinID) {
13395 case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
13396 case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
13397 default: llvm_unreachable("Unknown BuiltinID");
13398 }
13399 Function *F = CGM.getIntrinsic(ID);
13400 Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
13401 return Builder.CreateCall(F, {X, Y, M4Value});
13402 }
13403
13404 // Vector intrinsics that output the post-instruction CC value.
13405
13406#define INTRINSIC_WITH_CC(NAME) \
13407 case SystemZ::BI__builtin_##NAME: \
13408 return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
13409
13410 INTRINSIC_WITH_CC(s390_vpkshs);
13411 INTRINSIC_WITH_CC(s390_vpksfs);
13412 INTRINSIC_WITH_CC(s390_vpksgs);
13413
13414 INTRINSIC_WITH_CC(s390_vpklshs);
13415 INTRINSIC_WITH_CC(s390_vpklsfs);
13416 INTRINSIC_WITH_CC(s390_vpklsgs);
13417
13418 INTRINSIC_WITH_CC(s390_vceqbs);
13419 INTRINSIC_WITH_CC(s390_vceqhs);
13420 INTRINSIC_WITH_CC(s390_vceqfs);
13421 INTRINSIC_WITH_CC(s390_vceqgs);
13422
13423 INTRINSIC_WITH_CC(s390_vchbs);
13424 INTRINSIC_WITH_CC(s390_vchhs);
13425 INTRINSIC_WITH_CC(s390_vchfs);
13426 INTRINSIC_WITH_CC(s390_vchgs);
13427
13428 INTRINSIC_WITH_CC(s390_vchlbs);
13429 INTRINSIC_WITH_CC(s390_vchlhs);
13430 INTRINSIC_WITH_CC(s390_vchlfs);
13431 INTRINSIC_WITH_CC(s390_vchlgs);
13432
13433 INTRINSIC_WITH_CC(s390_vfaebs);
13434 INTRINSIC_WITH_CC(s390_vfaehs);
13435 INTRINSIC_WITH_CC(s390_vfaefs);
13436
13437 INTRINSIC_WITH_CC(s390_vfaezbs);
13438 INTRINSIC_WITH_CC(s390_vfaezhs);
13439 INTRINSIC_WITH_CC(s390_vfaezfs);
13440
13441 INTRINSIC_WITH_CC(s390_vfeebs);
13442 INTRINSIC_WITH_CC(s390_vfeehs);
13443 INTRINSIC_WITH_CC(s390_vfeefs);
13444
13445 INTRINSIC_WITH_CC(s390_vfeezbs);
13446 INTRINSIC_WITH_CC(s390_vfeezhs);
13447 INTRINSIC_WITH_CC(s390_vfeezfs);
13448
13449 INTRINSIC_WITH_CC(s390_vfenebs);
13450 INTRINSIC_WITH_CC(s390_vfenehs);
13451 INTRINSIC_WITH_CC(s390_vfenefs);
13452
13453 INTRINSIC_WITH_CC(s390_vfenezbs);
13454 INTRINSIC_WITH_CC(s390_vfenezhs);
13455 INTRINSIC_WITH_CC(s390_vfenezfs);
13456
13457 INTRINSIC_WITH_CC(s390_vistrbs);
13458 INTRINSIC_WITH_CC(s390_vistrhs);
13459 INTRINSIC_WITH_CC(s390_vistrfs);
13460
13461 INTRINSIC_WITH_CC(s390_vstrcbs);
13462 INTRINSIC_WITH_CC(s390_vstrchs);
13463 INTRINSIC_WITH_CC(s390_vstrcfs);
13464
13465 INTRINSIC_WITH_CC(s390_vstrczbs);
13466 INTRINSIC_WITH_CC(s390_vstrczhs);
13467 INTRINSIC_WITH_CC(s390_vstrczfs);
13468
13469 INTRINSIC_WITH_CC(s390_vfcesbs);
13470 INTRINSIC_WITH_CC(s390_vfcedbs);
13471 INTRINSIC_WITH_CC(s390_vfchsbs);
13472 INTRINSIC_WITH_CC(s390_vfchdbs);
13473 INTRINSIC_WITH_CC(s390_vfchesbs);
13474 INTRINSIC_WITH_CC(s390_vfchedbs);
13475
13476 INTRINSIC_WITH_CC(s390_vftcisb);
13477 INTRINSIC_WITH_CC(s390_vftcidb);
13478
13479#undef INTRINSIC_WITH_CC
13480
13481 default:
13482 return nullptr;
13483 }
13484}
13485
13486namespace {
13487// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
13488struct NVPTXMmaLdstInfo {
13489 unsigned NumResults; // Number of elements to load/store
13490 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
13491 unsigned IID_col;
13492 unsigned IID_row;
13493};
13494
13495#define MMA_INTR(geom_op_type, layout) \
13496 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
13497#define MMA_LDST(n, geom_op_type) \
13498 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
13499
13500static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
13501 switch (BuiltinID) {
13502 // FP MMA loads
13503 case NVPTX::BI__hmma_m16n16k16_ld_a:
13504 return MMA_LDST(8, m16n16k16_load_a_f16);
13505 case NVPTX::BI__hmma_m16n16k16_ld_b:
13506 return MMA_LDST(8, m16n16k16_load_b_f16);
13507 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
13508 return MMA_LDST(4, m16n16k16_load_c_f16);
13509 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
13510 return MMA_LDST(8, m16n16k16_load_c_f32);
13511 case NVPTX::BI__hmma_m32n8k16_ld_a:
13512 return MMA_LDST(8, m32n8k16_load_a_f16);
13513 case NVPTX::BI__hmma_m32n8k16_ld_b:
13514 return MMA_LDST(8, m32n8k16_load_b_f16);
13515 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
13516 return MMA_LDST(4, m32n8k16_load_c_f16);
13517 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
13518 return MMA_LDST(8, m32n8k16_load_c_f32);
13519 case NVPTX::BI__hmma_m8n32k16_ld_a:
13520 return MMA_LDST(8, m8n32k16_load_a_f16);
13521 case NVPTX::BI__hmma_m8n32k16_ld_b:
13522 return MMA_LDST(8, m8n32k16_load_b_f16);
13523 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
13524 return MMA_LDST(4, m8n32k16_load_c_f16);
13525 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
13526 return MMA_LDST(8, m8n32k16_load_c_f32);
13527
13528 // Integer MMA loads
13529 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
13530 return MMA_LDST(2, m16n16k16_load_a_s8);
13531 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
13532 return MMA_LDST(2, m16n16k16_load_a_u8);
13533 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
13534 return MMA_LDST(2, m16n16k16_load_b_s8);
13535 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
13536 return MMA_LDST(2, m16n16k16_load_b_u8);
13537 case NVPTX::BI__imma_m16n16k16_ld_c:
13538 return MMA_LDST(8, m16n16k16_load_c_s32);
13539 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
13540 return MMA_LDST(4, m32n8k16_load_a_s8);
13541 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
13542 return MMA_LDST(4, m32n8k16_load_a_u8);
13543 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
13544 return MMA_LDST(1, m32n8k16_load_b_s8);
13545 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
13546 return MMA_LDST(1, m32n8k16_load_b_u8);
13547 case NVPTX::BI__imma_m32n8k16_ld_c:
13548 return MMA_LDST(8, m32n8k16_load_c_s32);
13549 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
13550 return MMA_LDST(1, m8n32k16_load_a_s8);
13551 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
13552 return MMA_LDST(1, m8n32k16_load_a_u8);
13553 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
13554 return MMA_LDST(4, m8n32k16_load_b_s8);
13555 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
13556 return MMA_LDST(4, m8n32k16_load_b_u8);
13557 case NVPTX::BI__imma_m8n32k16_ld_c:
13558 return MMA_LDST(8, m8n32k16_load_c_s32);
13559
13560 // Sub-integer MMA loads.
13561 // Only row/col layout is supported by A/B fragments.
13562 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
13563 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
13564 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
13565 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
13566 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
13567 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
13568 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
13569 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
13570 case NVPTX::BI__imma_m8n8k32_ld_c:
13571 return MMA_LDST(2, m8n8k32_load_c_s32);
13572 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
13573 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
13574 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
13575 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
13576 case NVPTX::BI__bmma_m8n8k128_ld_c:
13577 return MMA_LDST(2, m8n8k128_load_c_s32);
13578
13579 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
13580 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
13581 // use fragment C for both loads and stores.
13582 // FP MMA stores.
13583 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
13584 return MMA_LDST(4, m16n16k16_store_d_f16);
13585 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
13586 return MMA_LDST(8, m16n16k16_store_d_f32);
13587 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
13588 return MMA_LDST(4, m32n8k16_store_d_f16);
13589 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
13590 return MMA_LDST(8, m32n8k16_store_d_f32);
13591 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
13592 return MMA_LDST(4, m8n32k16_store_d_f16);
13593 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
13594 return MMA_LDST(8, m8n32k16_store_d_f32);
13595
13596 // Integer and sub-integer MMA stores.
13597 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
13598 // name, integer loads/stores use LLVM's i32.
13599 case NVPTX::BI__imma_m16n16k16_st_c_i32:
13600 return MMA_LDST(8, m16n16k16_store_d_s32);
13601 case NVPTX::BI__imma_m32n8k16_st_c_i32:
13602 return MMA_LDST(8, m32n8k16_store_d_s32);
13603 case NVPTX::BI__imma_m8n32k16_st_c_i32:
13604 return MMA_LDST(8, m8n32k16_store_d_s32);
13605 case NVPTX::BI__imma_m8n8k32_st_c_i32:
13606 return MMA_LDST(2, m8n8k32_store_d_s32);
13607 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
13608 return MMA_LDST(2, m8n8k128_store_d_s32);
13609
13610 default:
13611 llvm_unreachable("Unknown MMA builtin");
13612 }
13613}
13614#undef MMA_LDST
13615#undef MMA_INTR
13616
13617
13618struct NVPTXMmaInfo {
13619 unsigned NumEltsA;
13620 unsigned NumEltsB;
13621 unsigned NumEltsC;
13622 unsigned NumEltsD;
13623 std::array<unsigned, 8> Variants;
13624
13625 unsigned getMMAIntrinsic(int Layout, bool Satf) {
13626 unsigned Index = Layout * 2 + Satf;
13627 if (Index >= Variants.size())
13628 return 0;
13629 return Variants[Index];
13630 }
13631};
13632
13633 // Returns an intrinsic that matches Layout and Satf for valid combinations of
13634 // Layout and Satf, 0 otherwise.
13635static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
13636 // clang-format off
13637#define MMA_VARIANTS(geom, type) {{ \
13638 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
13639 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
13640 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13641 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
13642 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
13643 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
13644 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
13645 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
13646 }}
13647// Sub-integer MMA only supports row.col layout.
13648#define MMA_VARIANTS_I4(geom, type) {{ \
13649 0, \
13650 0, \
13651 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13652 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
13653 0, \
13654 0, \
13655 0, \
13656 0 \
13657 }}
13658// b1 MMA does not support .satfinite.
13659#define MMA_VARIANTS_B1(geom, type) {{ \
13660 0, \
13661 0, \
13662 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
13663 0, \
13664 0, \
13665 0, \
13666 0, \
13667 0 \
13668 }}
13669 // clang-format on
13670 switch (BuiltinID) {
13671 // FP MMA
13672 // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
13673 // NumEltsN of return value are ordered as A,B,C,D.
13674 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
13675 return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
13676 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
13677 return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
13678 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
13679 return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
13680 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
13681 return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
13682 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
13683 return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
13684 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
13685 return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
13686 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
13687 return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
13688 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
13689 return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
13690 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
13691 return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
13692 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
13693 return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
13694 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
13695 return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
13696 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
13697 return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
13698
13699 // Integer MMA
13700 case NVPTX::BI__imma_m16n16k16_mma_s8:
13701 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
13702 case NVPTX::BI__imma_m16n16k16_mma_u8:
13703 return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
13704 case NVPTX::BI__imma_m32n8k16_mma_s8:
13705 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
13706 case NVPTX::BI__imma_m32n8k16_mma_u8:
13707 return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
13708 case NVPTX::BI__imma_m8n32k16_mma_s8:
13709 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
13710 case NVPTX::BI__imma_m8n32k16_mma_u8:
13711 return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
13712
13713 // Sub-integer MMA
13714 case NVPTX::BI__imma_m8n8k32_mma_s4:
13715 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
13716 case NVPTX::BI__imma_m8n8k32_mma_u4:
13717 return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
13718 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
13719 return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
13720 default:
13721 llvm_unreachable("Unexpected builtin ID.");
13722 }
13723#undef MMA_VARIANTS
13724#undef MMA_VARIANTS_I4
13725#undef MMA_VARIANTS_B1
13726}
13727
13728} // namespace
13729
13730Value *
13731CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
13732 auto MakeLdg = [&](unsigned IntrinsicID) {
13733 Value *Ptr = EmitScalarExpr(E->getArg(0));
13734 clang::CharUnits Align =
13735 getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
13736 return Builder.CreateCall(
13737 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
13738 Ptr->getType()}),
13739 {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
13740 };
13741 auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
13742 Value *Ptr = EmitScalarExpr(E->getArg(0));
13743 return Builder.CreateCall(
13744 CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
13745 Ptr->getType()}),
13746 {Ptr, EmitScalarExpr(E->getArg(1))});
13747 };
13748 switch (BuiltinID) {
13749 case NVPTX::BI__nvvm_atom_add_gen_i:
13750 case NVPTX::BI__nvvm_atom_add_gen_l:
13751 case NVPTX::BI__nvvm_atom_add_gen_ll:
13752 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
13753
13754 case NVPTX::BI__nvvm_atom_sub_gen_i:
13755 case NVPTX::BI__nvvm_atom_sub_gen_l:
13756 case NVPTX::BI__nvvm_atom_sub_gen_ll:
13757 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
13758
13759 case NVPTX::BI__nvvm_atom_and_gen_i:
13760 case NVPTX::BI__nvvm_atom_and_gen_l:
13761 case NVPTX::BI__nvvm_atom_and_gen_ll:
13762 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
13763
13764 case NVPTX::BI__nvvm_atom_or_gen_i:
13765 case NVPTX::BI__nvvm_atom_or_gen_l:
13766 case NVPTX::BI__nvvm_atom_or_gen_ll:
13767 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
13768
13769 case NVPTX::BI__nvvm_atom_xor_gen_i:
13770 case NVPTX::BI__nvvm_atom_xor_gen_l:
13771 case NVPTX::BI__nvvm_atom_xor_gen_ll:
13772 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
13773
13774 case NVPTX::BI__nvvm_atom_xchg_gen_i:
13775 case NVPTX::BI__nvvm_atom_xchg_gen_l:
13776 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
13777 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
13778
13779 case NVPTX::BI__nvvm_atom_max_gen_i:
13780 case NVPTX::BI__nvvm_atom_max_gen_l:
13781 case NVPTX::BI__nvvm_atom_max_gen_ll:
13782 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
13783
13784 case NVPTX::BI__nvvm_atom_max_gen_ui:
13785 case NVPTX::BI__nvvm_atom_max_gen_ul:
13786 case NVPTX::BI__nvvm_atom_max_gen_ull:
13787 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
13788
13789 case NVPTX::BI__nvvm_atom_min_gen_i:
13790 case NVPTX::BI__nvvm_atom_min_gen_l:
13791 case NVPTX::BI__nvvm_atom_min_gen_ll:
13792 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
13793
13794 case NVPTX::BI__nvvm_atom_min_gen_ui:
13795 case NVPTX::BI__nvvm_atom_min_gen_ul:
13796 case NVPTX::BI__nvvm_atom_min_gen_ull:
13797 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
13798
13799 case NVPTX::BI__nvvm_atom_cas_gen_i:
13800 case NVPTX::BI__nvvm_atom_cas_gen_l:
13801 case NVPTX::BI__nvvm_atom_cas_gen_ll:
13802 // __nvvm_atom_cas_gen_* should return the old value rather than the
13803 // success flag.
13804 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
13805
13806 case NVPTX::BI__nvvm_atom_add_gen_f: {
13807 Value *Ptr = EmitScalarExpr(E->getArg(0));
13808 Value *Val = EmitScalarExpr(E->getArg(1));
13809 // atomicrmw only deals with integer arguments so we need to use
13810 // LLVM's nvvm_atomic_load_add_f32 intrinsic for that.
13811 Function *FnALAF32 =
13812 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f32, Ptr->getType());
13813 return Builder.CreateCall(FnALAF32, {Ptr, Val});
13814 }
13815
13816 case NVPTX::BI__nvvm_atom_add_gen_d: {
13817 Value *Ptr = EmitScalarExpr(E->getArg(0));
13818 Value *Val = EmitScalarExpr(E->getArg(1));
13819 // atomicrmw only deals with integer arguments, so we need to use
13820 // LLVM's nvvm_atomic_load_add_f64 intrinsic.
13821 Function *FnALAF64 =
13822 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_add_f64, Ptr->getType());
13823 return Builder.CreateCall(FnALAF64, {Ptr, Val});
13824 }
13825
13826 case NVPTX::BI__nvvm_atom_inc_gen_ui: {
13827 Value *Ptr = EmitScalarExpr(E->getArg(0));
13828 Value *Val = EmitScalarExpr(E->getArg(1));
13829 Function *FnALI32 =
13830 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
13831 return Builder.CreateCall(FnALI32, {Ptr, Val});
13832 }
13833
13834 case NVPTX::BI__nvvm_atom_dec_gen_ui: {
13835 Value *Ptr = EmitScalarExpr(E->getArg(0));
13836 Value *Val = EmitScalarExpr(E->getArg(1));
13837 Function *FnALD32 =
13838 CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
13839 return Builder.CreateCall(FnALD32, {Ptr, Val});
13840 }
13841
13842 case NVPTX::BI__nvvm_ldg_c:
13843 case NVPTX::BI__nvvm_ldg_c2:
13844 case NVPTX::BI__nvvm_ldg_c4:
13845 case NVPTX::BI__nvvm_ldg_s:
13846 case NVPTX::BI__nvvm_ldg_s2:
13847 case NVPTX::BI__nvvm_ldg_s4:
13848 case NVPTX::BI__nvvm_ldg_i:
13849 case NVPTX::BI__nvvm_ldg_i2:
13850 case NVPTX::BI__nvvm_ldg_i4:
13851 case NVPTX::BI__nvvm_ldg_l:
13852 case NVPTX::BI__nvvm_ldg_ll:
13853 case NVPTX::BI__nvvm_ldg_ll2:
13854 case NVPTX::BI__nvvm_ldg_uc:
13855 case NVPTX::BI__nvvm_ldg_uc2:
13856 case NVPTX::BI__nvvm_ldg_uc4:
13857 case NVPTX::BI__nvvm_ldg_us:
13858 case NVPTX::BI__nvvm_ldg_us2:
13859 case NVPTX::BI__nvvm_ldg_us4:
13860 case NVPTX::BI__nvvm_ldg_ui:
13861 case NVPTX::BI__nvvm_ldg_ui2:
13862 case NVPTX::BI__nvvm_ldg_ui4:
13863 case NVPTX::BI__nvvm_ldg_ul:
13864 case NVPTX::BI__nvvm_ldg_ull:
13865 case NVPTX::BI__nvvm_ldg_ull2:
13866 // PTX Interoperability section 2.2: "For a vector with an even number of
13867 // elements, its alignment is set to number of elements times the alignment
13868 // of its member: n*alignof(t)."
13869 return MakeLdg(Intrinsic::nvvm_ldg_global_i);
13870 case NVPTX::BI__nvvm_ldg_f:
13871 case NVPTX::BI__nvvm_ldg_f2:
13872 case NVPTX::BI__nvvm_ldg_f4:
13873 case NVPTX::BI__nvvm_ldg_d:
13874 case NVPTX::BI__nvvm_ldg_d2:
13875 return MakeLdg(Intrinsic::nvvm_ldg_global_f);
13876
13877 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
13878 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
13879 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
13880 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
13881 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
13882 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
13883 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
13884 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
13885 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
13886 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
13887 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
13888 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
13889 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
13890 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
13891 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
13892 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
13893 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
13894 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
13895 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
13896 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
13897 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
13898 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
13899 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
13900 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
13901 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
13902 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
13903 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
13904 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
13905 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
13906 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
13907 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
13908 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
13909 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
13910 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
13911 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
13912 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
13913 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
13914 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
13915 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
13916 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
13917 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
13918 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
13919 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
13920 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
13921 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
13922 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
13923 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
13924 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
13925 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
13926 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
13927 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
13928 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
13929 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
13930 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
13931 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
13932 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
13933 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
13934 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
13935 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
13936 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
13937 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
13938 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
13939 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
13940 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
13941 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
13942 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
13943 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
13944 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
13945 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
13946 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
13947 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
13948 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
13949 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
13950 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
13951 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
13952 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
13953 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
13954 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
13955 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
13956 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
13957 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
13958 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
13959 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
13960 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
13961 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
13962 Value *Ptr = EmitScalarExpr(E->getArg(0));
13963 return Builder.CreateCall(
13964 CGM.getIntrinsic(
13965 Intrinsic::nvvm_atomic_cas_gen_i_cta,
13966 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
13967 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
13968 }
13969 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
13970 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
13971 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
13972 Value *Ptr = EmitScalarExpr(E->getArg(0));
13973 return Builder.CreateCall(
13974 CGM.getIntrinsic(
13975 Intrinsic::nvvm_atomic_cas_gen_i_sys,
13976 {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
13977 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
13978 }
13979 case NVPTX::BI__nvvm_match_all_sync_i32p:
13980 case NVPTX::BI__nvvm_match_all_sync_i64p: {
13981 Value *Mask = EmitScalarExpr(E->getArg(0));
13982 Value *Val = EmitScalarExpr(E->getArg(1));
13983 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
13984 Value *ResultPair = Builder.CreateCall(
13985 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
13986 ? Intrinsic::nvvm_match_all_sync_i32p
13987 : Intrinsic::nvvm_match_all_sync_i64p),
13988 {Mask, Val});
13989 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
13990 PredOutPtr.getElementType());
13991 Builder.CreateStore(Pred, PredOutPtr);
13992 return Builder.CreateExtractValue(ResultPair, 0);
13993 }
13994
13995 // FP MMA loads
13996 case NVPTX::BI__hmma_m16n16k16_ld_a:
13997 case NVPTX::BI__hmma_m16n16k16_ld_b:
13998 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
13999 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
14000 case NVPTX::BI__hmma_m32n8k16_ld_a:
14001 case NVPTX::BI__hmma_m32n8k16_ld_b:
14002 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
14003 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
14004 case NVPTX::BI__hmma_m8n32k16_ld_a:
14005 case NVPTX::BI__hmma_m8n32k16_ld_b:
14006 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
14007 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
14008 // Integer MMA loads.
14009 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
14010 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
14011 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
14012 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
14013 case NVPTX::BI__imma_m16n16k16_ld_c:
14014 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
14015 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
14016 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
14017 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
14018 case NVPTX::BI__imma_m32n8k16_ld_c:
14019 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
14020 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
14021 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
14022 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
14023 case NVPTX::BI__imma_m8n32k16_ld_c:
14024 // Sub-integer MMA loads.
14025 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
14026 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
14027 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
14028 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
14029 case NVPTX::BI__imma_m8n8k32_ld_c:
14030 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
14031 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
14032 case NVPTX::BI__bmma_m8n8k128_ld_c:
14033 {
14034 Address Dst = EmitPointerWithAlignment(E->getArg(0));
14035 Value *Src = EmitScalarExpr(E->getArg(1));
14036 Value *Ldm = EmitScalarExpr(E->getArg(2));
14037 llvm::APSInt isColMajorArg;
14038 if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
14039 return nullptr;
14040 bool isColMajor = isColMajorArg.getSExtValue();
14041 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
14042 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
14043 if (IID == 0)
14044 return nullptr;
14045
14046 Value *Result =
14047 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
14048
14049 // Save returned values.
14050 assert(II.NumResults);
14051 if (II.NumResults == 1) {
14052 Builder.CreateAlignedStore(Result, Dst.getPointer(),
14053 CharUnits::fromQuantity(4));
14054 } else {
14055 for (unsigned i = 0; i < II.NumResults; ++i) {
14056 Builder.CreateAlignedStore(
14057 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
14058 Dst.getElementType()),
14059 Builder.CreateGEP(Dst.getPointer(),
14060 llvm::ConstantInt::get(IntTy, i)),
14061 CharUnits::fromQuantity(4));
14062 }
14063 }
14064 return Result;
14065 }
14066
14067 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
14068 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
14069 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
14070 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
14071 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
14072 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
14073 case NVPTX::BI__imma_m16n16k16_st_c_i32:
14074 case NVPTX::BI__imma_m32n8k16_st_c_i32:
14075 case NVPTX::BI__imma_m8n32k16_st_c_i32:
14076 case NVPTX::BI__imma_m8n8k32_st_c_i32:
14077 case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
14078 Value *Dst = EmitScalarExpr(E->getArg(0));
14079 Address Src = EmitPointerWithAlignment(E->getArg(1));
14080 Value *Ldm = EmitScalarExpr(E->getArg(2));
14081 llvm::APSInt isColMajorArg;
14082 if (!E->getArg(3)->isIntegerConstantExpr(isColMajorArg, getContext()))
14083 return nullptr;
14084 bool isColMajor = isColMajorArg.getSExtValue();
14085 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
14086 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
14087 if (IID == 0)
14088 return nullptr;
14089 Function *Intrinsic =
14090 CGM.getIntrinsic(IID, Dst->getType());
14091 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
14092 SmallVector<Value *, 10> Values = {Dst};
14093 for (unsigned i = 0; i < II.NumResults; ++i) {
14094 Value *V = Builder.CreateAlignedLoad(
14095 Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
14096 CharUnits::fromQuantity(4));
14097 Values.push_back(Builder.CreateBitCast(V, ParamType));
14098 }
14099 Values.push_back(Ldm);
14100 Value *Result = Builder.CreateCall(Intrinsic, Values);
14101 return Result;
14102 }
14103
14104 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
14105 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
14106 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
14107 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
14108 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
14109 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
14110 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
14111 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
14112 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
14113 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
14114 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
14115 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
14116 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
14117 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
14118 case NVPTX::BI__imma_m16n16k16_mma_s8:
14119 case NVPTX::BI__imma_m16n16k16_mma_u8:
14120 case NVPTX::BI__imma_m32n8k16_mma_s8:
14121 case NVPTX::BI__imma_m32n8k16_mma_u8:
14122 case NVPTX::BI__imma_m8n32k16_mma_s8:
14123 case NVPTX::BI__imma_m8n32k16_mma_u8:
14124 case NVPTX::BI__imma_m8n8k32_mma_s4:
14125 case NVPTX::BI__imma_m8n8k32_mma_u4:
14126 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
14127 Address Dst = EmitPointerWithAlignment(E->getArg(0));
14128 Address SrcA = EmitPointerWithAlignment(E->getArg(1));
14129 Address SrcB = EmitPointerWithAlignment(E->getArg(2));
14130 Address SrcC = EmitPointerWithAlignment(E->getArg(3));
14131 llvm::APSInt LayoutArg;
14132 if (!E->getArg(4)->isIntegerConstantExpr(LayoutArg, getContext()))
14133 return nullptr;
14134 int Layout = LayoutArg.getSExtValue();
14135 if (Layout < 0 || Layout > 3)
14136 return nullptr;
14137 llvm::APSInt SatfArg;
14138 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
14139 SatfArg = 0; // .b1 does not have satf argument.
14140 else if (!E->getArg(5)->isIntegerConstantExpr(SatfArg, getContext()))
14141 return nullptr;
14142 bool Satf = SatfArg.getSExtValue();
14143 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
14144 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
14145 if (IID == 0) // Unsupported combination of Layout/Satf.
14146 return nullptr;
14147
14148 SmallVector<Value *, 24> Values;
14149 Function *Intrinsic = CGM.getIntrinsic(IID);
14150 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
14151 // Load A
14152 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
14153 Value *V = Builder.CreateAlignedLoad(
14154 Builder.CreateGEP(SrcA.getPointer(),
14155 llvm::ConstantInt::get(IntTy, i)),
14156 CharUnits::fromQuantity(4));
14157 Values.push_back(Builder.CreateBitCast(V, AType));
14158 }
14159 // Load B
14160 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
14161 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
14162 Value *V = Builder.CreateAlignedLoad(
14163 Builder.CreateGEP(SrcB.getPointer(),
14164 llvm::ConstantInt::get(IntTy, i)),
14165 CharUnits::fromQuantity(4));
14166 Values.push_back(Builder.CreateBitCast(V, BType));
14167 }
14168 // Load C
14169 llvm::Type *CType =
14170 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
14171 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
14172 Value *V = Builder.CreateAlignedLoad(
14173 Builder.CreateGEP(SrcC.getPointer(),
14174 llvm::ConstantInt::get(IntTy, i)),
14175 CharUnits::fromQuantity(4));
14176 Values.push_back(Builder.CreateBitCast(V, CType));
14177 }
14178 Value *Result = Builder.CreateCall(Intrinsic, Values);
14179 llvm::Type *DType = Dst.getElementType();
14180 for (unsigned i = 0; i < MI.NumEltsD; ++i)
14181 Builder.CreateAlignedStore(
14182 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
14183 Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
14184 CharUnits::fromQuantity(4));
14185 return Result;
14186 }
14187 default:
14188 return nullptr;
14189 }
14190}
14191
14192struct BuiltinAlignArgs {
14193 llvm::Value *Src = nullptr;
14194 llvm::Value *SrcAddr = nullptr;
14195 llvm::Value *SrcAsI8Cap = nullptr;
14196 llvm::Value *Alignment = nullptr;
14197 llvm::Value *Mask = nullptr;
14198 bool IsCheri = false;
14199
14200 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF, bool PowerOfTwo) {
14201 QualType AstType = E->getArg(0)->getType();
14202 if (AstType->isArrayType()) {
14203 AstType = CGF.getContext().getDecayedType(AstType);
14204 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
14205 } else {
14206 Src = CGF.EmitScalarExpr(E->getArg(0));
14207 }
14208 IsCheri = AstType->isCHERICapabilityType(CGF.CGM.getContext());
14209 llvm::IntegerType *IntType = IntegerType::get(
14210 CGF.getLLVMContext(), CGF.getContext().getIntRange(AstType));
14211
14212 // We need to convert source to an integer in order to perform the masking
14213 // For CHERI we need to get the virtual address and perform add the
14214 // difference instead of masking since that only works on the offset.
14215 if (IsCheri) {
14216 Value *Callee = CGF.CGM.getIntrinsic(Intrinsic::cheri_cap_address_get, CGF.IntPtrTy);
14217 SrcAsI8Cap = CGF.Builder.CreateBitCast(Src, CGF.CGM.Int8CheriCapTy);
14218 SrcAddr = CGF.Builder.CreateCall(Callee, {SrcAsI8Cap});
14219 } else {
14220 SrcAddr = CGF.Builder.CreateBitOrPointerCast(Src, IntType);
14221 }
14222 auto *One = llvm::ConstantInt::get(IntType, 1);
14223 Alignment = CGF.EmitScalarExpr(E->getArg(1));
14224 // Ensure that we also handle __uintcap_t values as the alignment param
14225 if (E->getArg(1)->getType()->isIntCapType()) {
14226 Value *Callee = CGF.CGM.getIntrinsic(Intrinsic::cheri_cap_address_get, CGF.IntPtrTy);
14227 Alignment = CGF.Builder.CreateCall(Callee, {Alignment});
14228 }
14229
14230 Alignment = CGF.Builder.CreateZExtOrTrunc(
14231 Alignment, IntType, PowerOfTwo ? "pow2" : "alignment");
14232 if (PowerOfTwo) {
14233 Alignment = CGF.Builder.CreateShl(One, Alignment, "alignment");
14234 }
14235 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
14236 }
14237};
14238
14239/// Generate (x & (y-1)) == 0
14240RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E,
14241 bool PowerOfTwo) {
14242 BuiltinAlignArgs Args(E, *this, PowerOfTwo);
14243 return RValue::get(Builder.CreateICmpEQ(
14244 Builder.CreateAnd(Args.SrcAddr, Args.Mask, "set_bits"),
14245 llvm::Constant::getNullValue(Args.SrcAddr->getType()), "is_aligned"));
14246}
14247
14248/// Generate (x & ~(y-1)) to align down or ((x + (y - 1)) & ~(y - 1)) to align
14249/// up. Note: for capability types we can't do the bitwise operations but
14250/// instead need to add/subtract the difference to/from the pointer. For
14251/// capabilities we do x - (x & y) for down and x + (y - (x & y))
14252RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool PowerOfTwo,
14253 bool AlignUp) {
14254 // FIXME this needs to use minus/plus for CHERI and not masking!!!!
14255 BuiltinAlignArgs Args(E, *this, PowerOfTwo);
14256 if (AlignUp) {
14257 // When aligning up we have to first add the mask to ensure we go over the
14258 // next alignment value and then align down to the next valid multiple
14259 // By adding the mask first, we ensure that align_up on an already aligned
14260 // value will not be changed.
14261 Args.SrcAddr = Builder.CreateAdd(Args.SrcAddr, Args.Mask, "over_boundary");
14262 }
14263 // When not targeting CHERI we can just use bitwise masking and cast the
14264 // result back to a pointer
14265 auto *Ret = Builder.CreateAnd(Args.SrcAddr,
14266 Builder.CreateNot(Args.Mask, "negated_mask"));
14267 if (Args.IsCheri) {
14268 // For CHERI capabilities we need to call CSetAddr to update the target address
14269 // If the backend supports an AND instruction on capabilities it should use a
14270 // pattern to convert this to AND instead of setaddr.
14271 Value *Callee = CGM.getIntrinsic(Intrinsic::cheri_cap_address_set, CGM.PtrDiffTy);
14272 Ret = Builder.CreateCall(Callee, {Args.SrcAsI8Cap, Builder.CreateBitCast(Ret, CGM.Int64Ty)});
14273 }
14274 return RValue::get(Builder.CreateBitOrPointerCast(Ret, Args.Src->getType(),
14275 "aligned_result"));
14276}
14277
14278Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
14279 const CallExpr *E) {
14280 switch (BuiltinID) {
14281 case WebAssembly::BI__builtin_wasm_memory_size: {
14282 llvm::Type *ResultType = ConvertType(E->getType());
14283 Value *I = EmitScalarExpr(E->getArg(0));
14284 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
14285 return Builder.CreateCall(Callee, I);
14286 }
14287 case WebAssembly::BI__builtin_wasm_memory_grow: {
14288 llvm::Type *ResultType = ConvertType(E->getType());
14289 Value *Args[] = {
14290 EmitScalarExpr(E->getArg(0)),
14291 EmitScalarExpr(E->getArg(1))
14292 };
14293 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
14294 return Builder.CreateCall(Callee, Args);
14295 }
14296 case WebAssembly::BI__builtin_wasm_memory_init: {
14297 llvm::APSInt SegConst;
14298 if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
14299 llvm_unreachable("Constant arg isn't actually constant?");
14300 llvm::APSInt MemConst;
14301 if (!E->getArg(1)->isIntegerConstantExpr(MemConst, getContext()))
14302 llvm_unreachable("Constant arg isn't actually constant?");
14303 if (!MemConst.isNullValue())
14304 ErrorUnsupported(E, "non-zero memory index");
14305 Value *Args[] = {llvm::ConstantInt::get(getLLVMContext(), SegConst),
14306 llvm::ConstantInt::get(getLLVMContext(), MemConst),
14307 EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)),
14308 EmitScalarExpr(E->getArg(4))};
14309 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_init);
14310 return Builder.CreateCall(Callee, Args);
14311 }
14312 case WebAssembly::BI__builtin_wasm_data_drop: {
14313 llvm::APSInt SegConst;
14314 if (!E->getArg(0)->isIntegerConstantExpr(SegConst, getContext()))
14315 llvm_unreachable("Constant arg isn't actually constant?");
14316 Value *Arg = llvm::ConstantInt::get(getLLVMContext(), SegConst);
14317 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_data_drop);
14318 return Builder.CreateCall(Callee, {Arg});
14319 }
14320 case WebAssembly::BI__builtin_wasm_throw: {
14321 Value *Tag = EmitScalarExpr(E->getArg(0));
14322 Value *Obj = EmitScalarExpr(E->getArg(1));
14323 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
14324 return Builder.CreateCall(Callee, {Tag, Obj});
14325 }
14326 case WebAssembly::BI__builtin_wasm_rethrow_in_catch: {
14327 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow_in_catch);
14328 return Builder.CreateCall(Callee);
14329 }
14330 case WebAssembly::BI__builtin_wasm_atomic_wait_i32: {
14331 Value *Addr = EmitScalarExpr(E->getArg(0));
14332 Value *Expected = EmitScalarExpr(E->getArg(1));
14333 Value *Timeout = EmitScalarExpr(E->getArg(2));
14334 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i32);
14335 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
14336 }
14337 case WebAssembly::BI__builtin_wasm_atomic_wait_i64: {
14338 Value *Addr = EmitScalarExpr(E->getArg(0));
14339 Value *Expected = EmitScalarExpr(E->getArg(1));
14340 Value *Timeout = EmitScalarExpr(E->getArg(2));
14341 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_wait_i64);
14342 return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
14343 }
14344 case WebAssembly::BI__builtin_wasm_atomic_notify: {
14345 Value *Addr = EmitScalarExpr(E->getArg(0));
14346 Value *Count = EmitScalarExpr(E->getArg(1));
14347 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_atomic_notify);
14348 return Builder.CreateCall(Callee, {Addr, Count});
14349 }
14350 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
14351 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
14352 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
14353 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
14354 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4:
14355 case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64x2_f64x2: {
14356 Value *Src = EmitScalarExpr(E->getArg(0));
14357 llvm::Type *ResT = ConvertType(E->getType());
14358 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
14359 {ResT, Src->getType()});
14360 return Builder.CreateCall(Callee, {Src});
14361 }
14362 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
14363 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
14364 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
14365 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
14366 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4:
14367 case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64x2_f64x2: {
14368 Value *Src = EmitScalarExpr(E->getArg(0));
14369 llvm::Type *ResT = ConvertType(E->getType());
14370 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
14371 {ResT, Src->getType()});
14372 return Builder.CreateCall(Callee, {Src});
14373 }
14374 case WebAssembly::BI__builtin_wasm_min_f32:
14375 case WebAssembly::BI__builtin_wasm_min_f64:
14376 case WebAssembly::BI__builtin_wasm_min_f32x4:
14377 case WebAssembly::BI__builtin_wasm_min_f64x2: {
14378 Value *LHS = EmitScalarExpr(E->getArg(0));
14379 Value *RHS = EmitScalarExpr(E->getArg(1));
14380 Function *Callee = CGM.getIntrinsic(Intrinsic::minimum,
14381 ConvertType(E->getType()));
14382 return Builder.CreateCall(Callee, {LHS, RHS});
14383 }
14384 case WebAssembly::BI__builtin_wasm_max_f32:
14385 case WebAssembly::BI__builtin_wasm_max_f64:
14386 case WebAssembly::BI__builtin_wasm_max_f32x4:
14387 case WebAssembly::BI__builtin_wasm_max_f64x2: {
14388 Value *LHS = EmitScalarExpr(E->getArg(0));
14389 Value *RHS = EmitScalarExpr(E->getArg(1));
14390 Function *Callee = CGM.getIntrinsic(Intrinsic::maximum,
14391 ConvertType(E->getType()));
14392 return Builder.CreateCall(Callee, {LHS, RHS});
14393 }
14394 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
14395 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
14396 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
14397 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
14398 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
14399 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
14400 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
14401 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
14402 llvm::APSInt LaneConst;
14403 if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
14404 llvm_unreachable("Constant arg isn't actually constant?");
14405 Value *Vec = EmitScalarExpr(E->getArg(0));
14406 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
14407 Value *Extract = Builder.CreateExtractElement(Vec, Lane);
14408 switch (BuiltinID) {
14409 case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
14410 case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
14411 return Builder.CreateSExt(Extract, ConvertType(E->getType()));
14412 case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
14413 case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
14414 return Builder.CreateZExt(Extract, ConvertType(E->getType()));
14415 case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
14416 case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
14417 case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
14418 case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
14419 return Extract;
14420 default:
14421 llvm_unreachable("unexpected builtin ID");
14422 }
14423 }
14424 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
14425 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
14426 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
14427 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
14428 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
14429 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
14430 llvm::APSInt LaneConst;
14431 if (!E->getArg(1)->isIntegerConstantExpr(LaneConst, getContext()))
14432 llvm_unreachable("Constant arg isn't actually constant?");
14433 Value *Vec = EmitScalarExpr(E->getArg(0));
14434 Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
14435 Value *Val = EmitScalarExpr(E->getArg(2));
14436 switch (BuiltinID) {
14437 case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
14438 case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
14439 llvm::Type *ElemType = ConvertType(E->getType())->getVectorElementType();
14440 Value *Trunc = Builder.CreateTrunc(Val, ElemType);
14441 return Builder.CreateInsertElement(Vec, Trunc, Lane);
14442 }
14443 case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
14444 case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
14445 case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
14446 case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
14447 return Builder.CreateInsertElement(Vec, Val, Lane);
14448 default:
14449 llvm_unreachable("unexpected builtin ID");
14450 }
14451 }
14452 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
14453 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
14454 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
14455 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
14456 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
14457 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
14458 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
14459 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
14460 unsigned IntNo;
14461 switch (BuiltinID) {
14462 case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
14463 case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
14464 IntNo = Intrinsic::sadd_sat;
14465 break;
14466 case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
14467 case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
14468 IntNo = Intrinsic::uadd_sat;
14469 break;
14470 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
14471 case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
14472 IntNo = Intrinsic::wasm_sub_saturate_signed;
14473 break;
14474 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
14475 case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
14476 IntNo = Intrinsic::wasm_sub_saturate_unsigned;
14477 break;
14478 default:
14479 llvm_unreachable("unexpected builtin ID");
14480 }
14481 Value *LHS = EmitScalarExpr(E->getArg(0));
14482 Value *RHS = EmitScalarExpr(E->getArg(1));
14483 Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
14484 return Builder.CreateCall(Callee, {LHS, RHS});
14485 }
14486 case WebAssembly::BI__builtin_wasm_bitselect: {
14487 Value *V1 = EmitScalarExpr(E->getArg(0));
14488 Value *V2 = EmitScalarExpr(E->getArg(1));
14489 Value *C = EmitScalarExpr(E->getArg(2));
14490 Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_bitselect,
14491 ConvertType(E->getType()));
14492 return Builder.CreateCall(Callee, {V1, V2, C});
14493 }
14494 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
14495 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
14496 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
14497 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
14498 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
14499 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
14500 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
14501 case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
14502 unsigned IntNo;
14503 switch (BuiltinID) {
14504 case WebAssembly::BI__builtin_wasm_any_true_i8x16:
14505 case WebAssembly::BI__builtin_wasm_any_true_i16x8:
14506 case WebAssembly::BI__builtin_wasm_any_true_i32x4:
14507 case WebAssembly::BI__builtin_wasm_any_true_i64x2:
14508 IntNo = Intrinsic::wasm_anytrue;
14509 break;
14510 case WebAssembly::BI__builtin_wasm_all_true_i8x16:
14511 case WebAssembly::BI__builtin_wasm_all_true_i16x8:
14512 case WebAssembly::BI__builtin_wasm_all_true_i32x4:
14513 case WebAssembly::BI__builtin_wasm_all_true_i64x2:
14514 IntNo = Intrinsic::wasm_alltrue;
14515 break;
14516 default:
14517 llvm_unreachable("unexpected builtin ID");
14518 }
14519 Value *Vec = EmitScalarExpr(E->getArg(0));
14520 Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
14521 return Builder.CreateCall(Callee, {Vec});
14522 }
14523 case WebAssembly::BI__builtin_wasm_abs_f32x4:
14524 case WebAssembly::BI__builtin_wasm_abs_f64x2: {
14525 Value *Vec = EmitScalarExpr(E->getArg(0));
14526 Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
14527 return Builder.CreateCall(Callee, {Vec});
14528 }
14529 case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
14530 case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
14531 Value *Vec = EmitScalarExpr(E->getArg(0));
14532 Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
14533 return Builder.CreateCall(Callee, {Vec});
14534 }
14535
14536 default:
14537 return nullptr;
14538 }
14539}
14540
14541Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
14542 const CallExpr *E) {
14543 SmallVector<llvm::Value *, 4> Ops;
14544 Intrinsic::ID ID = Intrinsic::not_intrinsic;
14545
14546 auto MakeCircLd = [&](unsigned IntID, bool HasImm) {
14547 // The base pointer is passed by address, so it needs to be loaded.
14548 Address BP = EmitPointerWithAlignment(E->getArg(0));
14549 BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
14550 BP.getAlignment());
14551 llvm::Value *Base = Builder.CreateLoad(BP);
14552 // Operands are Base, Increment, Modifier, Start.
14553 if (HasImm)
14554 Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
14555 EmitScalarExpr(E->getArg(3)) };
14556 else
14557 Ops = { Base, EmitScalarExpr(E->getArg(1)),
14558 EmitScalarExpr(E->getArg(2)) };
14559
14560 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14561 llvm::Value *NewBase = Builder.CreateExtractValue(Result, 1);
14562 llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
14563 NewBase->getType()->getPointerTo());
14564 Address Dest = EmitPointerWithAlignment(E->getArg(0));
14565 // The intrinsic generates two results. The new value for the base pointer
14566 // needs to be stored.
14567 Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
14568 return Builder.CreateExtractValue(Result, 0);
14569 };
14570
14571 auto MakeCircSt = [&](unsigned IntID, bool HasImm) {
14572 // The base pointer is passed by address, so it needs to be loaded.
14573 Address BP = EmitPointerWithAlignment(E->getArg(0));
14574 BP = Address(Builder.CreateBitCast(BP.getPointer(), Int8PtrPtrTy),
14575 BP.getAlignment());
14576 llvm::Value *Base = Builder.CreateLoad(BP);
14577 // Operands are Base, Increment, Modifier, Value, Start.
14578 if (HasImm)
14579 Ops = { Base, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)),
14580 EmitScalarExpr(E->getArg(3)), EmitScalarExpr(E->getArg(4)) };
14581 else
14582 Ops = { Base, EmitScalarExpr(E->getArg(1)),
14583 EmitScalarExpr(E->getArg(2)), EmitScalarExpr(E->getArg(3)) };
14584
14585 llvm::Value *NewBase = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14586 llvm::Value *LV = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)),
14587 NewBase->getType()->getPointerTo());
14588 Address Dest = EmitPointerWithAlignment(E->getArg(0));
14589 // The intrinsic generates one result, which is the new value for the base
14590 // pointer. It needs to be stored.
14591 return Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
14592 };
14593
14594 // Handle the conversion of bit-reverse load intrinsics to bit code.
14595 // The intrinsic call after this function only reads from memory and the
14596 // write to memory is dealt by the store instruction.
14597 auto MakeBrevLd = [&](unsigned IntID, llvm::Type *DestTy) {
14598 // The intrinsic generates one result, which is the new value for the base
14599 // pointer. It needs to be returned. The result of the load instruction is
14600 // passed to intrinsic by address, so the value needs to be stored.
14601 llvm::Value *BaseAddress =
14602 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
14603
14604 // Expressions like &(*pt++) will be incremented per evaluation.
14605 // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
14606 // per call.
14607 Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
14608 DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
14609 DestAddr.getAlignment());
14610 llvm::Value *DestAddress = DestAddr.getPointer();
14611
14612 // Operands are Base, Dest, Modifier.
14613 // The intrinsic format in LLVM IR is defined as
14614 // { ValueType, i8* } (i8*, i32).
14615 Ops = {BaseAddress, EmitScalarExpr(E->getArg(2))};
14616
14617 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
14618 // The value needs to be stored as the variable is passed by reference.
14619 llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
14620
14621 // The store needs to be truncated to fit the destination type.
14622 // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
14623 // to be handled with stores of respective destination type.
14624 DestVal = Builder.CreateTrunc(DestVal, DestTy);
14625
14626 llvm::Value *DestForStore =
14627 Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
14628 Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
14629 // The updated value of the base pointer is returned.
14630 return Builder.CreateExtractValue(Result, 1);
14631 };
14632
14633 switch (BuiltinID) {
14634 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
14635 case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B: {
14636 Address Dest = EmitPointerWithAlignment(E->getArg(2));
14637 unsigned Size;
14638 if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vaddcarry) {
14639 Size = 512;
14640 ID = Intrinsic::hexagon_V6_vaddcarry;
14641 } else {
14642 Size = 1024;
14643 ID = Intrinsic::hexagon_V6_vaddcarry_128B;
14644 }
14645 Dest = Builder.CreateBitCast(Dest,
14646 llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
14647 LoadInst *QLd = Builder.CreateLoad(Dest);
14648 Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
14649 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14650 llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
14651 llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
14652 Vprd->getType()->getPointerTo(0));
14653 Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
14654 return Builder.CreateExtractValue(Result, 0);
14655 }
14656 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
14657 case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
14658 Address Dest = EmitPointerWithAlignment(E->getArg(2));
14659 unsigned Size;
14660 if (BuiltinID == Hexagon::BI__builtin_HEXAGON_V6_vsubcarry) {
14661 Size = 512;
14662 ID = Intrinsic::hexagon_V6_vsubcarry;
14663 } else {
14664 Size = 1024;
14665 ID = Intrinsic::hexagon_V6_vsubcarry_128B;
14666 }
14667 Dest = Builder.CreateBitCast(Dest,
14668 llvm::VectorType::get(Builder.getInt1Ty(), Size)->getPointerTo(0));
14669 LoadInst *QLd = Builder.CreateLoad(Dest);
14670 Ops = { EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), QLd };
14671 llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14672 llvm::Value *Vprd = Builder.CreateExtractValue(Result, 1);
14673 llvm::Value *Base = Builder.CreateBitCast(EmitScalarExpr(E->getArg(2)),
14674 Vprd->getType()->getPointerTo(0));
14675 Builder.CreateAlignedStore(Vprd, Base, Dest.getAlignment());
14676 return Builder.CreateExtractValue(Result, 0);
14677 }
14678 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
14679 return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pci, /*HasImm*/true);
14680 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
14681 return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pci, /*HasImm*/true);
14682 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
14683 return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pci, /*HasImm*/true);
14684 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
14685 return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pci, /*HasImm*/true);
14686 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
14687 return MakeCircLd(Intrinsic::hexagon_L2_loadri_pci, /*HasImm*/true);
14688 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
14689 return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pci, /*HasImm*/true);
14690 case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
14691 return MakeCircLd(Intrinsic::hexagon_L2_loadrub_pcr, /*HasImm*/false);
14692 case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
14693 return MakeCircLd(Intrinsic::hexagon_L2_loadrb_pcr, /*HasImm*/false);
14694 case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
14695 return MakeCircLd(Intrinsic::hexagon_L2_loadruh_pcr, /*HasImm*/false);
14696 case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
14697 return MakeCircLd(Intrinsic::hexagon_L2_loadrh_pcr, /*HasImm*/false);
14698 case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
14699 return MakeCircLd(Intrinsic::hexagon_L2_loadri_pcr, /*HasImm*/false);
14700 case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
14701 return MakeCircLd(Intrinsic::hexagon_L2_loadrd_pcr, /*HasImm*/false);
14702 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
14703 return MakeCircSt(Intrinsic::hexagon_S2_storerb_pci, /*HasImm*/true);
14704 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
14705 return MakeCircSt(Intrinsic::hexagon_S2_storerh_pci, /*HasImm*/true);
14706 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
14707 return MakeCircSt(Intrinsic::hexagon_S2_storerf_pci, /*HasImm*/true);
14708 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
14709 return MakeCircSt(Intrinsic::hexagon_S2_storeri_pci, /*HasImm*/true);
14710 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
14711 return MakeCircSt(Intrinsic::hexagon_S2_storerd_pci, /*HasImm*/true);
14712 case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
14713 return MakeCircSt(Intrinsic::hexagon_S2_storerb_pcr, /*HasImm*/false);
14714 case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
14715 return MakeCircSt(Intrinsic::hexagon_S2_storerh_pcr, /*HasImm*/false);
14716 case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
14717 return MakeCircSt(Intrinsic::hexagon_S2_storerf_pcr, /*HasImm*/false);
14718 case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
14719 return MakeCircSt(Intrinsic::hexagon_S2_storeri_pcr, /*HasImm*/false);
14720 case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
14721 return MakeCircSt(Intrinsic::hexagon_S2_storerd_pcr, /*HasImm*/false);
14722 case Hexagon::BI__builtin_brev_ldub:
14723 return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
14724 case Hexagon::BI__builtin_brev_ldb:
14725 return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
14726 case Hexagon::BI__builtin_brev_lduh:
14727 return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
14728 case Hexagon::BI__builtin_brev_ldh:
14729 return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
14730 case Hexagon::BI__builtin_brev_ldw:
14731 return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
14732 case Hexagon::BI__builtin_brev_ldd:
14733 return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
14734 default:
14735 break;
14736 } // switch
14737
14738 return nullptr;
14739}
14740